source
stringlengths
3
92
c
stringlengths
26
2.25M
dwt-simple.c
#include "dwt-simple.h" #include "libdwt.h" #include "inline.h" #include <math.h> static void op4s_sdl_shuffle_s_ref(float *c, float *r) { c[0] = c[1]; c[1] = c[2]; c[2] = c[3]; r[0] = r[1]; r[1] = r[2]; r[2] = r[3]; } static void op2s_sdl_shuffle_s_ref(float *c, float *r) { c[0] = c[1]; r[0] = r[1]; } static void op4s_sdl_load_stride_s_ref(float *x, const float *addr, int stride) { x[0] = *addr1_const_s(addr, 0, stride); x[1] = *addr1_const_s(addr, 1, stride); } static void op2s_sdl_load_stride_s_ref(float *x, const float *addr, int stride) { x[0] = *addr1_const_s(addr, 0, stride); x[1] = *addr1_const_s(addr, 1, stride); } static void op4s_sdl_save_stride_s_ref(float *y, float *addr, int stride) { *addr1_s(addr, 0, stride) = y[0]; *addr1_s(addr, 1, stride) = y[1]; } static void op2s_sdl_save_stride_s_ref(float *y, float *addr, int stride) { *addr1_s(addr, 0, stride) = y[0]; *addr1_s(addr, 1, stride) = y[1]; } static void op4s_sdl_input_s_ref(const float *x, float *c, float *r) { c[3] = x[0]; r[3] = x[1]; } static void op2s_sdl_input_s_ref(const float *x, float *c, float *r) { c[1] = x[0]; r[1] = x[1]; } static void op4s_sdl_output_s_ref(float *y, const float *l, const float *z) { y[0] = l[0]; y[1] = z[0]; } static void op2s_sdl_output_s_ref(float *y, const float *l, const float *z) { y[0] = l[0]; y[1] = z[0]; } static void op4s_sdl_scale_s_ref(float *y, const float *v) { y[0] *= v[0]; y[1] *= v[1]; } static void op2s_sdl_scale_s_ref(float *y, const float *v) { y[0] *= v[0]; y[1] *= v[1]; } static void op4s_sdl_op_s_ref(float *z, const float *c, const float *w, const float *l, const float *r) { z[3] = c[3] + w[3] * ( l[3] + r[3] ); z[2] = c[2] + w[2] * ( l[2] + r[2] ); z[1] = c[1] + w[1] * ( l[1] + r[1] ); z[0] = c[0] + w[0] * ( l[0] + r[0] ); } static void op2s_sdl_op_s_ref(float *z, const float *c, const float *w, const float *l, const float *r) { z[1] = c[1] + w[1] * ( l[1] + r[1] ); z[0] = c[0] + w[0] * ( l[0] + r[0] ); } static void op2s_sdl_op_s_ref_eaw( float *z, const float *c, const float *w, // [beta] [alpha] const float *l, const float *r, const float *eaw_w // [betaL] [betaR] [-] [alphaL] [alphaR] ) { z[1] = c[1] + ( eaw_w[3]*l[1] + eaw_w[4]*r[1] ) / (eaw_w[3]+eaw_w[4]) * (2.f*w[1]); // alpha z[0] = c[0] + ( eaw_w[0]*l[0] + eaw_w[1]*r[0] ) / (eaw_w[0]+eaw_w[1]) * (2.f*w[0]); // beta } static void op4s_sdl_update_s_ref(float *c, float *l, float *r, const float *z) { c[0] = l[0]; c[1] = l[1]; c[2] = l[2]; c[3] = l[3]; l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; r[0] = z[0]; r[1] = z[1]; r[2] = z[2]; r[3] = z[3]; } static void op2s_sdl_update_s_ref(float *c, float *l, float *r, const float *z) { c[0] = l[0]; c[1] = l[1]; l[0] = r[0]; l[1] = r[1]; r[0] = z[0]; r[1] = z[1]; } static void fdwt_cdf97_diagonal_prolog_s( const float *w, const float *v, float *l, float *c, float *r, float *z, float *x, float *y, float **addr, int stride ) { UNUSED(v); UNUSED(y); // shuffle op4s_sdl_shuffle_s_ref(c, r); // load op4s_sdl_load_stride_s_ref(x, addr1_s(*addr, +4, stride), stride); // (descale) // input op4s_sdl_input_s_ref(x, c, r); // operation op4s_sdl_op_s_ref(z, c, w, l, r); // (output) // (scale) // (save) // update op4s_sdl_update_s_ref(c, l, r, z); // pointers *addr = addr1_s(*addr, 2, stride); } static void fdwt_cdf53_diagonal_prolog_s( const float *w, const float *v, float *l, float *c, float *r, float *z, float *x, float *y, float **addr, int stride ) { UNUSED(v); UNUSED(y); // shuffle op2s_sdl_shuffle_s_ref(c, r); // load op2s_sdl_load_stride_s_ref(x, addr1_s(*addr, +2, stride), stride); // (descale) // input op2s_sdl_input_s_ref(x, c, r); // operation op2s_sdl_op_s_ref(z, c, w, l, r); // (output) // (scale) // (save) // update op2s_sdl_update_s_ref(c, l, r, z); // pointers *addr = addr1_s(*addr, 2, stride); } static void fdwt_eaw53_diagonal_prolog_s( const float *w, const float *v, float *l, float *c, float *r, float *z, float *x, float *y, float **addr, int stride, const float *eaw_w ) { UNUSED(v); UNUSED(y); // shuffle op2s_sdl_shuffle_s_ref(c, r); // load op2s_sdl_load_stride_s_ref(x, addr1_s(*addr, +2, stride), stride); // (descale) // input op2s_sdl_input_s_ref(x, c, r); // operation op2s_sdl_op_s_ref_eaw(z, c, w, l, r, eaw_w); // (output) // (scale) // (save) // update op2s_sdl_update_s_ref(c, l, r, z); // pointers *addr = addr1_s(*addr, 2, stride); } static void fdwt_cdf97_diagonal_epilog_s( const float *w, const float *v, float *l, float *c, float *r, float *z, float *x, float *y, float **addr, int stride ) { UNUSED(x); // shuffle op4s_sdl_shuffle_s_ref(c, r); // (load) // (descale) // (input) // operation op4s_sdl_op_s_ref(z, c, w, l, r); // output op4s_sdl_output_s_ref(y, l, z); // scale op4s_sdl_scale_s_ref(y, v); // save op4s_sdl_save_stride_s_ref(y, addr1_s(*addr, -6, stride), stride); // update op4s_sdl_update_s_ref(c, l, r, z); // pointers (*addr) = addr1_s(*addr, 2, stride); } static void fdwt_cdf53_diagonal_epilog_s( const float *w, const float *v, float *l, float *c, float *r, float *z, float *x, float *y, float **addr, int stride ) { UNUSED(x); // shuffle op2s_sdl_shuffle_s_ref(c, r); // (load) // (descale) // (input) // operation op2s_sdl_op_s_ref(z, c, w, l, r); // output op2s_sdl_output_s_ref(y, l, z); // scale op2s_sdl_scale_s_ref(y, v); // save op2s_sdl_save_stride_s_ref(y, addr1_s(*addr, -2, stride), stride); // update op2s_sdl_update_s_ref(c, l, r, z); // pointers (*addr) = addr1_s(*addr, 2, stride); } static void fdwt_eaw53_diagonal_epilog_s( const float *w, const float *v, float *l, float *c, float *r, float *z, float *x, float *y, float **addr, int stride, const float *eaw_w ) { UNUSED(x); // shuffle op2s_sdl_shuffle_s_ref(c, r); // (load) // (descale) // (input) // operation op2s_sdl_op_s_ref_eaw(z, c, w, l, r, eaw_w); // output op2s_sdl_output_s_ref(y, l, z); // scale op2s_sdl_scale_s_ref(y, v); // save op2s_sdl_save_stride_s_ref(y, addr1_s(*addr, -2, stride), stride); // update op2s_sdl_update_s_ref(c, l, r, z); // pointers (*addr) = addr1_s(*addr, 2, stride); } static void fdwt_cdf97_short_s( float *arr, int N, int stride ) { assert( arr ); float alpha = -dwt_cdf97_p1_s; float beta = +dwt_cdf97_u1_s; float gamma = -dwt_cdf97_p2_s; float delta = +dwt_cdf97_u2_s; float zeta = +dwt_cdf97_s1_s; if( 1 == N ) { *addr1_s(arr, 0, stride) *= zeta; } if( 2 == N ) { // alpha *addr1_s(arr, 1, stride) += 2*alpha * (*addr1_s(arr, 0, stride)); // beta *addr1_s(arr, 0, stride) += 2*beta * (*addr1_s(arr, 1, stride)); // gamma *addr1_s(arr, 1, stride) += 2*gamma * (*addr1_s(arr, 0, stride)); // delta *addr1_s(arr, 0, stride) += 2*delta * (*addr1_s(arr, 1, stride)); // scaling *addr1_s(arr, 0, stride) *= zeta; *addr1_s(arr, 1, stride) *= 1/zeta; } if( 3 == N ) { // alpha *addr1_s(arr, 1, stride) += alpha * (*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // beta *addr1_s(arr, 0, stride) += 2*beta * (*addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += 2*beta * (*addr1_s(arr, 1, stride)); // gamma *addr1_s(arr, 1, stride) += gamma * (*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // delta *addr1_s(arr, 0, stride) += 2*delta * (*addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += 2*delta * (*addr1_s(arr, 1, stride)); // scaling *addr1_s(arr, 0, stride) *= zeta; *addr1_s(arr, 1, stride) *= 1/zeta; *addr1_s(arr, 2, stride) *= zeta; } if( 4 == N ) { // alpha *addr1_s(arr, 1, stride) += alpha * (*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); *addr1_s(arr, 3, stride) += 2*alpha * (*addr1_s(arr, 2, stride)); // beta *addr1_s(arr, 0, stride) += 2*beta * (*addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += beta * (*addr1_s(arr, 1, stride) + *addr1_s(arr, 3, stride)); // gamma *addr1_s(arr, 1, stride) += gamma * (*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); *addr1_s(arr, 3, stride) += 2*gamma * (*addr1_s(arr, 2, stride)); // delta *addr1_s(arr, 0, stride) += 2*delta * (*addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += delta * (*addr1_s(arr, 1, stride) + *addr1_s(arr, 3, stride)); // scaling *addr1_s(arr, 0, stride) *= zeta; *addr1_s(arr, 1, stride) *= 1/zeta; *addr1_s(arr, 2, stride) *= zeta; *addr1_s(arr, 3, stride) *= 1/zeta; } } static void fdwt_cdf53_short_s( float *arr, int N, int stride ) { assert( arr ); float alpha = -dwt_cdf53_p1_s; float beta = +dwt_cdf53_u1_s; float zeta = +dwt_cdf53_s1_s; if( 1 == N ) { *addr1_s(arr, 0, stride) *= zeta; } if( 2 == N ) { // alpha *addr1_s(arr, 1, stride) += 2*alpha * (*addr1_s(arr, 0, stride)); // beta *addr1_s(arr, 0, stride) += 2*beta * (*addr1_s(arr, 1, stride)); // scaling *addr1_s(arr, 0, stride) *= zeta; *addr1_s(arr, 1, stride) *= 1/zeta; } } static void fdwt_eaw53_short_s( float *arr, int N, int stride, const float *eaw_w ) { assert( arr ); UNUSED( eaw_w ); float alpha = -dwt_cdf53_p1_s; float beta = +dwt_cdf53_u1_s; float zeta = +dwt_cdf53_s1_s; if( 1 == N ) { *addr1_s(arr, 0, stride) *= zeta; } if( 2 == N ) { // alpha *addr1_s(arr, 1, stride) += 2*alpha * (*addr1_s(arr, 0, stride)); // beta *addr1_s(arr, 0, stride) += 2*beta * (*addr1_s(arr, 1, stride)); // scaling *addr1_s(arr, 0, stride) *= zeta; *addr1_s(arr, 1, stride) *= 1/zeta; } } static void fdwt_cdf97_prolog_s( float *arr, int N, int stride ) { assert( N >= 5 ); assert( arr ); float alpha = -dwt_cdf97_p1_s; float beta = +dwt_cdf97_u1_s; float gamma = -dwt_cdf97_p2_s; float delta = +dwt_cdf97_u2_s; float zeta = +dwt_cdf97_s1_s; // alpha *addr1_s(arr, 1, stride) += alpha * (*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); *addr1_s(arr, 3, stride) += alpha * (*addr1_s(arr, 2, stride) + *addr1_s(arr, 4, stride)); // beta *addr1_s(arr, 0, stride) += 2*beta * (*addr1_s(arr, 1, stride)); *addr1_s(arr, 2, stride) += beta * (*addr1_s(arr, 1, stride) + *addr1_s(arr, 3, stride)); // gamma *addr1_s(arr, 1, stride) += gamma * (*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // delta *addr1_s(arr, 0, stride) += 2*delta * (*addr1_s(arr, 1, stride)); // scaling *addr1_s(arr, 0, stride) *= zeta; } static void fdwt_cdf53_prolog_s( float *arr, int N, int stride ) { assert( N >= 3 ); assert( arr ); float alpha = -dwt_cdf53_p1_s; float beta = +dwt_cdf53_u1_s; float zeta = +dwt_cdf53_s1_s; // alpha *addr1_s(arr, 1, stride) += alpha * (*addr1_s(arr, 0, stride) + *addr1_s(arr, 2, stride)); // beta *addr1_s(arr, 0, stride) += 2*beta * (*addr1_s(arr, 1, stride)); // scaling *addr1_s(arr, 0, stride) *= zeta; } static void fdwt_eaw53_prolog_s( float *arr, int N, int stride, const float *eaw_w ) { assert( N >= 3 ); assert( arr ); float alpha = -dwt_cdf53_p1_s; float beta = +dwt_cdf53_u1_s; float zeta = +dwt_cdf53_s1_s; // alpha *addr1_s(arr, 1, stride) += ( eaw_w[1-1] * *addr1_s(arr, 0, stride) + eaw_w[1+0] * *addr1_s(arr, 2, stride) ) / (eaw_w[1-1]+eaw_w[1+0]) * (2.f * alpha); // beta *addr1_s(arr, 0, stride) += 2*beta * (*addr1_s(arr, 1, stride)); // scaling *addr1_s(arr, 0, stride) *= zeta; } static void fdwt_cdf97_vertical_core_s( float *ptr0, float *ptr1, float *out0, float *out1, float alpha, float beta, float gamma, float delta, float zeta, float *l // [4] ) { // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // aux. variables float x[2]; float y[2]; float r[4]; float c[4]; // inputs x[0] = *ptr0; x[1] = *ptr1; // shuffles y[0] = l[0]; c[0] = l[1]; c[1] = l[2]; c[2] = l[3]; c[3] = x[0]; // operation r[3] = x[1]; r[2] = c[3] + w[3] * (l[3] + r[3]); r[1] = c[2] + w[2] * (l[2] + r[2]); r[0] = c[1] + w[1] * (l[1] + r[1]); y[1] = c[0] + w[0] * (l[0] + r[0]); // scales y[0] *= v[0]; y[1] *= v[1]; // outputs *out0 = y[0]; *out1 = y[1]; // update l[0] = r[0]; l[1] = r[1]; l[2] = r[2]; l[3] = r[3]; } static void fdwt_cdf53_vertical_core_s( float *ptr0, float *ptr1, float *out0, float *out1, float alpha, float beta, float zeta, float *l // [2] ) { // constants const float w[2] = { beta, alpha }; const float v[2] = { 1/zeta, zeta }; // aux. variables float x[2]; float y[2]; float r[2]; float c[2]; // inputs x[0] = *ptr0; x[1] = *ptr1; // shuffles y[0] = l[0]; c[0] = l[1]; c[1] = x[0]; // operation r[1] = x[1]; r[0] = c[1] + w[1] * (l[1] + r[1]); y[1] = c[0] + w[0] * (l[0] + r[0]); // scales y[0] *= v[0]; y[1] *= v[1]; // outputs *out0 = y[0]; *out1 = y[1]; // update l[0] = r[0]; l[1] = r[1]; } static void fdwt_eaw53_vertical_core_s( float *ptr0, float *ptr1, float *out0, float *out1, float alpha, float beta, float zeta, float *l, // [2] const float *eaw_w // [3]: [0] = wL/beta, [1] = wR/beta = wL/alpha, [2] = wR/alpha ) { // constants const float w[2] = { beta, alpha }; const float v[2] = { 1/zeta, zeta }; // aux. variables float x[2]; float y[2]; float r[2]; float c[2]; // inputs x[0] = *ptr0; x[1] = *ptr1; // shuffles y[0] = l[0]; c[0] = l[1]; c[1] = x[0]; // operation r[1] = x[1]; r[0] = c[1] + (eaw_w[1]*l[1]+eaw_w[2]*r[1]) / (eaw_w[1]+eaw_w[2]) * (2.f*w[1]); // alpha y[1] = c[0] + (eaw_w[0]*l[0]+eaw_w[1]*r[0]) / (eaw_w[0]+eaw_w[1]) * (2.f*w[0]); // beta // scales y[0] *= v[0]; y[1] *= v[1]; // outputs *out0 = y[0]; *out1 = y[1]; // update l[0] = r[0]; l[1] = r[1]; } void fdwt_cdf97_vertical_s( void *ptr, int size, int stride ) { assert( ptr ); float alpha = -dwt_cdf97_p1_s; float beta = +dwt_cdf97_u1_s; float gamma = -dwt_cdf97_p2_s; float delta = +dwt_cdf97_u2_s; float zeta = +dwt_cdf97_s1_s; int pairs = (to_even(size)-4)/2; float *begin = addr1_s(ptr, 0, stride); assert( pairs >= 0 ); // buffer float l[4]; // prolog-vertical l[0] = *addr1_s(begin, 0, stride); l[1] = *addr1_s(begin, 1, stride); l[2] = *addr1_s(begin, 2, stride); l[3] = *addr1_s(begin, 3, stride); // init float *addr = addr1_s(begin, 4, stride); // loop by pairs from left to right for(int s = 0; s < pairs; s++) { fdwt_cdf97_vertical_core_s( addr1_s(addr, 0, stride), addr1_s(addr, 1, stride), addr1_s(addr, 0-4, stride), addr1_s(addr, 1-4, stride), alpha, beta, gamma, delta, zeta, l ); // pointers addr = addr1_s(addr, 2, stride); } // epilog-vertical *addr1_s(addr, 0-4, stride) = l[0]; *addr1_s(addr, 1-4, stride) = l[1]; *addr1_s(addr, 2-4, stride) = l[2]; *addr1_s(addr, 3-4, stride) = l[3]; } void fdwt_cdf53_vertical_s( void *ptr, int size, int stride ) { assert( ptr ); float alpha = -dwt_cdf53_p1_s; float beta = +dwt_cdf53_u1_s; float zeta = +dwt_cdf53_s1_s; int pairs = (to_even(size)-2)/2; float *begin = addr1_s(ptr, 0, stride); assert( pairs >= 0 ); // buffer float l[2]; // prolog-vertical l[0] = *addr1_s(begin, 0, stride); l[1] = *addr1_s(begin, 1, stride); // init float *addr = addr1_s(begin, 2, stride); // loop by pairs from left to right for(int s = 0; s < pairs; s++) { fdwt_cdf53_vertical_core_s( addr1_s(addr, 0, stride), addr1_s(addr, 1, stride), addr1_s(addr, 0-2, stride), addr1_s(addr, 1-2, stride), alpha, beta, zeta, l ); // pointers addr = addr1_s(addr, 2, stride); } // epilog-vertical *addr1_s(addr, 0-2, stride) = l[0]; *addr1_s(addr, 1-2, stride) = l[1]; } void fdwt_eaw53_vertical_s( void *ptr, int size, int stride, const float *eaw_w ) { assert( ptr ); float alpha = -dwt_cdf53_p1_s; float beta = +dwt_cdf53_u1_s; float zeta = +dwt_cdf53_s1_s; int pairs = (to_even(size)-2)/2; float *begin = addr1_s(ptr, 0, stride); assert( pairs >= 0 ); // buffer float l[2]; // prolog-vertical l[0] = *addr1_s(begin, 0, stride); l[1] = *addr1_s(begin, 1, stride); // init float *addr = addr1_s(begin, 2, stride); // loop by pairs from left to right for(int s = 0; s < pairs; s++) { fdwt_eaw53_vertical_core_s( addr1_s(addr, 0, stride), addr1_s(addr, 1, stride), addr1_s(addr, 0-2, stride), addr1_s(addr, 1-2, stride), alpha, beta, zeta, l, &eaw_w[2*s] ); // pointers addr = addr1_s(addr, 2, stride); } // epilog-vertical *addr1_s(addr, 0-2, stride) = l[0]; *addr1_s(addr, 1-2, stride) = l[1]; } void fdwt_cdf97_horizontal_s( void *ptr, int size, int stride ) { assert( ptr ); float alpha = -dwt_cdf97_p1_s; float beta = +dwt_cdf97_u1_s; float gamma = -dwt_cdf97_p2_s; float delta = +dwt_cdf97_u2_s; float zeta = +dwt_cdf97_s1_s; int pairs = (to_even(size)-4)/2; float *begin = addr1_s(ptr, 0, stride); assert( pairs >= 0 ); // constants const float w[4] = { delta, gamma, beta, alpha }; const float v[2] = { 1/zeta, zeta }; // operations for(int off = 4; off >= 1; off--) { float *out = addr1_s(begin, off, stride); const float c = w[off-1]; for(int s = 0; s < pairs; s++) { *addr1_s(out, 0, stride) += c * (*addr1_s(out, -1, stride) + *addr1_s(out, +1, stride)); out = addr1_s(out, 2, stride); } } // scale float *out = addr1_s(begin, 0, stride); for(int s = 0; s < pairs; s++) { *addr1_s(out, 0, stride) *= v[0]; *addr1_s(out, 1, stride) *= v[1]; out = addr1_s(out, 2, stride); } } void fdwt_cdf53_horizontal_s( void *ptr, int size, int stride ) { assert( ptr ); float alpha = -dwt_cdf53_p1_s; float beta = +dwt_cdf53_u1_s; float zeta = +dwt_cdf53_s1_s; int pairs = (to_even(size)-2)/2; float *begin = addr1_s(ptr, 0, stride); assert( pairs >= 0 ); // constants const float w[2] = { beta, alpha }; const float v[2] = { 1/zeta, zeta }; // operations for(int off = 2; off >= 1; off--) { float *out = addr1_s(begin, off, stride); const float c = w[off-1]; for(int s = 0; s < pairs; s++) { *addr1_s(out, 0, stride) += c * (*addr1_s(out, -1, stride) + *addr1_s(out, +1, stride)); out = addr1_s(out, 2, stride); } } // scale float *out = addr1_s(begin, 0, stride); for(int s = 0; s < pairs; s++) { *addr1_s(out, 0, stride) *= v[0]; *addr1_s(out, 1, stride) *= v[1]; out = addr1_s(out, 2, stride); } } static float dwt_eaw_w(float n, float m, float alpha) { const float eps = 1.0e-5f; return 1.f / (powf(fabsf(n-m), alpha) + eps); } static void dwt_calc_eaw_w_stride_s( float *w, float *arr, int N, int stride, float alpha ) { assert( w ); assert( arr ); for(int i = 0; i < N-1; i++) { w[i] = dwt_eaw_w( *addr1_s(arr, i+0, stride), *addr1_s(arr, i+1, stride), alpha); } w[N-1] = 0.f; // not necessary } void fdwt_eaw53_horizontal_s( void *ptr, int size, int stride, const float *eaw_w ) { assert( ptr ); float alpha = -dwt_cdf53_p1_s; float beta = +dwt_cdf53_u1_s; float zeta = +dwt_cdf53_s1_s; int pairs = (to_even(size)-2)/2; float *begin = addr1_s(ptr, 0, stride); assert( pairs >= 0 ); // constants const float c[2] = { beta, alpha }; const float v[2] = { 1/zeta, zeta }; // operations for(int off = 2; off >= 1; off--) { float *out = addr1_s(begin, off, stride); const float coeff = c[off-1]; for(int s = 0; s < pairs; s++) { float wL = eaw_w[off+2*s-1]; float wR = eaw_w[off+2*s+0]; *addr1_s(out, 0, stride) += ( wL * *addr1_s(out, -1, stride) + wR * *addr1_s(out, +1, stride) ) / (wL+wR) * (2.f * coeff); out = addr1_s(out, 2, stride); } } // scale float *out = addr1_s(begin, 0, stride); for(int s = 0; s < pairs; s++) { *addr1_s(out, 0, stride) *= v[0]; *addr1_s(out, 1, stride) *= v[1]; out = addr1_s(out, 2, stride); } } static void fdwt_cdf97_diagonal_core_s( const float *w, const float *v, float *l, float *c, float *r, float *z, float *x, float *y, float **addr, int stride ) { // shuffle op4s_sdl_shuffle_s_ref(c, r); // load op4s_sdl_load_stride_s_ref(x, addr1_s(*addr, +4, stride), stride); // (descale) // input op4s_sdl_input_s_ref(x, c, r); // operation op4s_sdl_op_s_ref(z, c, w, l, r); // output op4s_sdl_output_s_ref(y, l, z); // scale op4s_sdl_scale_s_ref(y, v); // save op4s_sdl_save_stride_s_ref(y, addr1_s(*addr, -6, stride), stride); // update op4s_sdl_update_s_ref(c, l, r, z); // pointers *addr = addr1_s(*addr, 2, stride); } static void fdwt_cdf53_diagonal_core_s( const float *w, const float *v, float *l, float *c, float *r, float *z, float *x, float *y, float **addr, int stride ) { // shuffle op2s_sdl_shuffle_s_ref(c, r); // load op2s_sdl_load_stride_s_ref(x, addr1_s(*addr, +2, stride), stride); // (descale) // input op2s_sdl_input_s_ref(x, c, r); // operation op2s_sdl_op_s_ref(z, c, w, l, r); // output op2s_sdl_output_s_ref(y, l, z); // scale op2s_sdl_scale_s_ref(y, v); // save op2s_sdl_save_stride_s_ref(y, addr1_s(*addr, -2, stride), stride); // update op2s_sdl_update_s_ref(c, l, r, z); // pointers *addr = addr1_s(*addr, 2, stride); } static void fdwt_eaw53_diagonal_core_s( const float *w, const float *v, float *l, float *c, float *r, float *z, float *x, float *y, float **addr, int stride, const float *eaw_w ) { // shuffle op2s_sdl_shuffle_s_ref(c, r); // load op2s_sdl_load_stride_s_ref(x, addr1_s(*addr, +2, stride), stride); // (descale) // input op2s_sdl_input_s_ref(x, c, r); // operation op2s_sdl_op_s_ref_eaw(z, c, w, l, r, eaw_w); // output op2s_sdl_output_s_ref(y, l, z); // scale op2s_sdl_scale_s_ref(y, v); // save op2s_sdl_save_stride_s_ref(y, addr1_s(*addr, -2, stride), stride); // update op2s_sdl_update_s_ref(c, l, r, z); // pointers *addr = addr1_s(*addr, 2, stride); } void fdwt_cdf97_diagonal_s( void *ptr, int size, int stride ) { assert( ptr ); float alpha = -dwt_cdf97_p1_s; float beta = +dwt_cdf97_u1_s; float gamma = -dwt_cdf97_p2_s; float delta = +dwt_cdf97_u2_s; float zeta = +dwt_cdf97_s1_s; int pairs = (to_even(size)-4)/2; float *begin = addr1_s(ptr, 0, stride); float *end = addr1_s(ptr, 2*pairs, stride); if( pairs < 3 ) { // NOTE: unfornunately, the diagonal vectorisation cannot handle less than 3 pairs of coefficients fdwt_cdf97_vertical_s(ptr, size, stride); } if( pairs >= 3 ) { const float w[4] = { delta, gamma, beta, alpha }; const float v[4] = { 1/zeta, zeta, 1/zeta, zeta }; float l[4]; float c[4]; float r[4]; float z[4]; float x[4]; float y[4]; float *addr = begin; // prolog-diagonal l[3] = *addr1_const_s(begin, 3, stride); fdwt_cdf97_diagonal_prolog_s(w, v, l, c, r, z, x, y, &addr, stride); l[2] = *addr1_const_s(begin, 2, stride); fdwt_cdf97_diagonal_prolog_s(w, v, l, c, r, z, x, y, &addr, stride); l[1] = *addr1_const_s(begin, 1, stride); fdwt_cdf97_diagonal_prolog_s(w, v, l, c, r, z, x, y, &addr, stride); l[0] = *addr1_const_s(begin, 0, stride); // core for(int s = 0; s < pairs-3; s++) { fdwt_cdf97_diagonal_core_s(w, v, l, c, r, z, x, y, &addr, stride); } // epilog-diagonal *addr1_s(end, 3, stride) = l[3]; fdwt_cdf97_diagonal_epilog_s(w, v, l, c, r, z, x, y, &addr, stride); *addr1_s(end, 2, stride) = l[2]; fdwt_cdf97_diagonal_epilog_s(w, v, l, c, r, z, x, y, &addr, stride); *addr1_s(end, 1, stride) = l[1]; fdwt_cdf97_diagonal_epilog_s(w, v, l, c, r, z, x, y, &addr, stride); *addr1_s(end, 0, stride) = l[0]; } } void fdwt_cdf53_diagonal_s( void *ptr, int size, int stride ) { assert( ptr ); float alpha = -dwt_cdf53_p1_s; float beta = +dwt_cdf53_u1_s; float zeta = +dwt_cdf53_s1_s; int pairs = (to_even(size)-2)/2; float *begin = addr1_s(ptr, 0, stride); float *end = addr1_s(ptr, 2*pairs, stride); if( pairs < 1 ) { // NOTE: unfornunately, the diagonal vectorisation cannot handle less than 1 pair of coefficients fdwt_cdf53_vertical_s(ptr, size, stride); } if( pairs >= 1 ) { const float w[2] = { beta, alpha }; const float v[2] = { 1/zeta, zeta }; float l[2]; float c[2]; float r[2]; float z[2]; float x[2]; float y[2]; float *addr = begin; // prolog-diagonal l[1] = *addr1_const_s(begin, 1, stride); fdwt_cdf53_diagonal_prolog_s(w, v, l, c, r, z, x, y, &addr, stride); l[0] = *addr1_const_s(begin, 0, stride); // core for(int s = 0; s < pairs-1; s++) { fdwt_cdf53_diagonal_core_s(w, v, l, c, r, z, x, y, &addr, stride); } // epilog-diagonal *addr1_s(end, 1, stride) = l[1]; fdwt_cdf53_diagonal_epilog_s(w, v, l, c, r, z, x, y, &addr, stride); *addr1_s(end, 0, stride) = l[0]; } } void fdwt_eaw53_diagonal_s( void *ptr, int size, int stride, const float *eaw_w ) { assert( ptr ); float alpha = -dwt_cdf53_p1_s; float beta = +dwt_cdf53_u1_s; float zeta = +dwt_cdf53_s1_s; int pairs = (to_even(size)-2)/2; float *begin = addr1_s(ptr, 0, stride); float *end = addr1_s(ptr, 2*pairs, stride); if( pairs < 1 ) { // NOTE: unfornunately, the diagonal vectorisation cannot handle less than 1 pair of coefficients fdwt_eaw53_vertical_s(ptr, size, stride, eaw_w); } if( pairs >= 1 ) { const float w[2] = { beta, alpha }; const float v[2] = { 1/zeta, zeta }; float l[2]; float c[2]; float r[2]; float z[2]; float x[2]; float y[2]; float *addr = begin; // prolog-diagonal l[1] = *addr1_const_s(begin, 1, stride); fdwt_eaw53_diagonal_prolog_s(w, v, l, c, r, z, x, y, &addr, stride, &eaw_w[-2]); l[0] = *addr1_const_s(begin, 0, stride); // core for(int s = 0; s < pairs-1; s++) { fdwt_eaw53_diagonal_core_s(w, v, l, c, r, z, x, y, &addr, stride, &eaw_w[2*s]); } // epilog-diagonal *addr1_s(end, 1, stride) = l[1]; fdwt_eaw53_diagonal_epilog_s(w, v, l, c, r, z, x, y, &addr, stride, &eaw_w[2*pairs-2]); *addr1_s(end, 0, stride) = l[0]; } } static void fdwt_cdf97_epilog_s( float *arr, int N, int stride ) { assert( N >= 4 ); assert( arr ); float alpha = -dwt_cdf97_p1_s; float beta = +dwt_cdf97_u1_s; float gamma = -dwt_cdf97_p2_s; float delta = +dwt_cdf97_u2_s; float zeta = +dwt_cdf97_s1_s; if( is_even(N) ) { // alpha // none // beta *addr1_s(arr, N-1, stride) += 2*beta*(*addr1_s(arr, N-2, stride)); // gamma *addr1_s(arr, N-2, stride) += gamma*(*addr1_s(arr, N-1, stride) + *addr1_s(arr, N-3, stride)); // delta *addr1_s(arr, N-1, stride) += 2*delta*(*addr1_s(arr, N-2, stride)); *addr1_s(arr, N-3, stride) += delta*(*addr1_s(arr, N-4, stride) + *addr1_s(arr, N-2, stride)); // scaling *addr1_s(arr, N-4, stride) *= 1/zeta; *addr1_s(arr, N-3, stride) *= zeta; *addr1_s(arr, N-2, stride) *= 1/zeta; *addr1_s(arr, N-1, stride) *= zeta; } else /* is_odd(N) */ { // alpha *addr1_s(arr, N-1, stride) += 2*alpha*(*addr1_s(arr, N-2, stride)); // beta *addr1_s(arr, N-2, stride) += beta*(*addr1_s(arr, N-1, stride) + *addr1_s(arr, N-3, stride)); // gamma *addr1_s(arr, N-1, stride) += 2*gamma*(*addr1_s(arr, N-2, stride)); *addr1_s(arr, N-3, stride) += gamma*(*addr1_s(arr, N-2, stride) + *addr1_s(arr, N-4, stride)); // delta *addr1_s(arr, N-2, stride) += delta*(*addr1_s(arr, N-1, stride) + *addr1_s(arr, N-3, stride)); *addr1_s(arr, N-4, stride) += delta*(*addr1_s(arr, N-5, stride) + *addr1_s(arr, N-3, stride)); // scaling *addr1_s(arr, N-5, stride) *= 1/zeta; *addr1_s(arr, N-4, stride) *= zeta; *addr1_s(arr, N-3, stride) *= 1/zeta; *addr1_s(arr, N-2, stride) *= zeta; *addr1_s(arr, N-1, stride) *= 1/zeta; } } static void fdwt_cdf53_epilog_s( float *arr, int N, int stride ) { assert( N >= 2 ); assert( arr ); float alpha = -dwt_cdf53_p1_s; float beta = +dwt_cdf53_u1_s; float zeta = +dwt_cdf53_s1_s; if( is_even(N) ) { // alpha // none // beta *addr1_s(arr, N-1, stride) += 2*beta*(*addr1_s(arr, N-2, stride)); // scaling *addr1_s(arr, N-2, stride) *= 1/zeta; *addr1_s(arr, N-1, stride) *= zeta; } else /* is_odd(N) */ { // alpha *addr1_s(arr, N-1, stride) += 2*alpha*(*addr1_s(arr, N-2, stride)); // beta *addr1_s(arr, N-2, stride) += beta*(*addr1_s(arr, N-1, stride) + *addr1_s(arr, N-3, stride)); // scaling *addr1_s(arr, N-3, stride) *= 1/zeta; *addr1_s(arr, N-2, stride) *= zeta; *addr1_s(arr, N-1, stride) *= 1/zeta; } } static void fdwt_eaw53_epilog_s( float *arr, int N, int stride, const float *eaw_w ) { assert( N >= 2 ); assert( arr ); float alpha = -dwt_cdf53_p1_s; float beta = +dwt_cdf53_u1_s; float zeta = +dwt_cdf53_s1_s; if( is_even(N) ) { // alpha // none // beta *addr1_s(arr, N-1, stride) += 2*beta*(*addr1_s(arr, N-2, stride)); // scaling *addr1_s(arr, N-2, stride) *= 1/zeta; *addr1_s(arr, N-1, stride) *= zeta; } else /* is_odd(N) */ { // alpha *addr1_s(arr, N-1, stride) += 2*alpha*(*addr1_s(arr, N-2, stride)); // beta float wL = eaw_w[(N-2)-1]; float wR = eaw_w[(N-2)+0]; *addr1_s(arr, N-2, stride) += ( wL * *addr1_s(arr, N-3, stride) + wR * *addr1_s(arr, N-1, stride) ) / (wL+wR) * (2.f * beta); // scaling *addr1_s(arr, N-3, stride) *= 1/zeta; *addr1_s(arr, N-2, stride) *= zeta; *addr1_s(arr, N-1, stride) *= 1/zeta; } } void fdwt2_cdf97_vertical_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int *j_max_ptr, int decompose_one ) { const int offset = 1; #ifdef _OPENMP const int threads = dwt_util_get_num_threads(); #endif const int size_min = min(size_x, size_y); const int size_max = max(size_x, size_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_max : size_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_x_j = ceil_div_pow2(size_x, j); const int size_y_j = ceil_div_pow2(size_y, j); const int stride_y_j = stride_y * (1 << j); const int stride_x_j = stride_x * (1 << j); #ifdef _OPENMP const int threads_segment_y = ceil_div(size_y_j, threads); const int threads_segment_x = ceil_div(size_x_j, threads); #endif if( size_x_j > 1 && size_x_j < 5 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_short_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_y_j > 1 && size_y_j < 5 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_short_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 5 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 5 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 5 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_vertical_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 5 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_vertical_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 5 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_epilog_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 5 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_epilog_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } j++; } } void fdwt2h1_cdf97_vertical_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int *j_max_ptr, int decompose_one ) { const int offset = 1; #ifdef _OPENMP const int threads = dwt_util_get_num_threads(); #endif const int size_min = min(size_x, size_y); const int size_max = max(size_x, size_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_max : size_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_x_j = ceil_div_pow2(size_x, j); const int size_y_j = ceil_div_pow2(size_y, j); const int stride_y_j = stride_y * (1 << j); const int stride_x_j = stride_x * (1 << j); #ifdef _OPENMP const int threads_segment_y = ceil_div(size_y_j, threads); #endif if( size_x_j > 1 && size_x_j < 5 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_short_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_x_j > 1 && size_x_j >= 5 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_x_j > 1 && size_x_j >= 5 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_vertical_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } if( size_x_j > 1 && size_x_j >= 5 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_epilog_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } j++; } } void fdwt2v1_cdf97_vertical_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int *j_max_ptr, int decompose_one ) { const int offset = 1; #ifdef _OPENMP const int threads = dwt_util_get_num_threads(); #endif const int size_min = min(size_x, size_y); const int size_max = max(size_x, size_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_max : size_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_x_j = ceil_div_pow2(size_x, j); const int size_y_j = ceil_div_pow2(size_y, j); const int stride_y_j = stride_y * (1 << j); const int stride_x_j = stride_x * (1 << j); #ifdef _OPENMP const int threads_segment_x = ceil_div(size_x_j, threads); #endif if( size_y_j > 1 && size_y_j < 5 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_short_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_y_j > 1 && size_y_j >= 5 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_y_j > 1 && size_y_j >= 5 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_vertical_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } if( size_y_j > 1 && size_y_j >= 5 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_epilog_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } j++; } } void fdwt2_cdf53_vertical_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int *j_max_ptr, int decompose_one ) { const int offset = 1; #ifdef _OPENMP const int threads = dwt_util_get_num_threads(); #endif const int size_min = min(size_x, size_y); const int size_max = max(size_x, size_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_max : size_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_x_j = ceil_div_pow2(size_x, j); const int size_y_j = ceil_div_pow2(size_y, j); const int stride_y_j = stride_y * (1 << j); const int stride_x_j = stride_x * (1 << j); #ifdef _OPENMP const int threads_segment_y = ceil_div(size_y_j, threads); const int threads_segment_x = ceil_div(size_x_j, threads); #endif if( size_x_j > 1 && size_x_j < 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf53_short_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_y_j > 1 && size_y_j < 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf53_short_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf53_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf53_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 3 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_y_j; y++) { fdwt_cdf53_vertical_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 3 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x_j; x++) { fdwt_cdf53_vertical_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf53_epilog_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf53_epilog_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } j++; } } void fdwt1_cdf97_horizontal_s( void *ptr, int size, int stride, int *j_max_ptr ) { const int offset = 1; int j = 0; const int j_limit = ceil_log2(size); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_x_j = ceil_div_pow2(size, j); const int stride_y_j = stride * (1 << j); if( size_x_j > 1 && size_x_j < 5 ) { fdwt_cdf97_short_s( ptr, size_x_j, stride_y_j ); } if( size_x_j >= 5 ) { fdwt_cdf97_prolog_s( ptr, size_x_j, stride_y_j ); fdwt_cdf97_horizontal_s( addr1_s(ptr, 0+offset, stride_y_j), size_x_j-offset, stride_y_j ); fdwt_cdf97_epilog_s( addr1_s(ptr, 0+offset, stride_y_j), size_x_j-offset, stride_y_j ); } j++; } } void fdwt1_single_cdf97_horizontal_s( void *ptr, int size, int stride ) { const int offset = 1; const int j_limit = ceil_log2(size); if( j_limit < 1 ) return; const int size_x_j = size; const int stride_y_j = stride; if( size_x_j > 1 && size_x_j < 5 ) { fdwt_cdf97_short_s( ptr, size_x_j, stride_y_j ); } if( size_x_j >= 5 ) { fdwt_cdf97_prolog_s( ptr, size_x_j, stride_y_j ); fdwt_cdf97_horizontal_s( addr1_s(ptr, 0+offset, stride_y_j), size_x_j-offset, stride_y_j ); fdwt_cdf97_epilog_s( addr1_s(ptr, 0+offset, stride_y_j), size_x_j-offset, stride_y_j ); } } void fdwt1_single_cdf97_horizontal_min5_s( void *ptr, int size, int stride ) { assert( size >= 5 ); const int offset = 1; fdwt_cdf97_prolog_s( ptr, size, stride ); fdwt_cdf97_horizontal_s( addr1_s(ptr, 0+offset, stride), size-offset, stride ); fdwt_cdf97_epilog_s( addr1_s(ptr, 0+offset, stride), size-offset, stride ); } void fdwt1_single_cdf97_vertical_min5_s( void *ptr, int size, int stride ) { assert( size >= 5 ); const int offset = 1; fdwt_cdf97_prolog_s( ptr, size, stride ); fdwt_cdf97_vertical_s( addr1_s(ptr, 0+offset, stride), size-offset, stride ); fdwt_cdf97_epilog_s( addr1_s(ptr, 0+offset, stride), size-offset, stride ); } void fdwt2_cdf97_horizontal_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int *j_max_ptr, int decompose_one ) { const int offset = 1; #ifdef _OPENMP const int threads = dwt_util_get_num_threads(); #endif const int size_min = min(size_x, size_y); const int size_max = max(size_x, size_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_max : size_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_x_j = ceil_div_pow2(size_x, j); const int size_y_j = ceil_div_pow2(size_y, j); const int stride_y_j = stride_y * (1 << j); const int stride_x_j = stride_x * (1 << j); #ifdef _OPENMP const int threads_segment_y = ceil_div(size_y_j, threads); const int threads_segment_x = ceil_div(size_x_j, threads); #endif if( size_x_j > 1 && size_x_j < 5 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_short_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_y_j > 1 && size_y_j < 5 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_short_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 5 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 5 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 5 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_horizontal_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 5 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_horizontal_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 5 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_epilog_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 5 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_epilog_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } j++; } } void fdwt2_cdf53_horizontal_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int *j_max_ptr, int decompose_one ) { const int offset = 1; #ifdef _OPENMP const int threads = dwt_util_get_num_threads(); #endif const int size_min = min(size_x, size_y); const int size_max = max(size_x, size_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_max : size_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_x_j = ceil_div_pow2(size_x, j); const int size_y_j = ceil_div_pow2(size_y, j); const int stride_y_j = stride_y * (1 << j); const int stride_x_j = stride_x * (1 << j); #ifdef _OPENMP const int threads_segment_y = ceil_div(size_y_j, threads); const int threads_segment_x = ceil_div(size_x_j, threads); #endif if( size_x_j > 1 && size_x_j < 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf53_short_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_y_j > 1 && size_y_j < 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf53_short_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf53_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf53_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 3 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_y_j; y++) { fdwt_cdf53_horizontal_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 3 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x_j; x++) { fdwt_cdf53_horizontal_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf53_epilog_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf53_epilog_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } j++; } } /** * @warning The EAW decomposition is different if we compute weights "w" for second (vertical) filtering before xor after first (horizontal) filtering. */ void fdwt2_eaw53_horizontal_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int *j_max_ptr, int decompose_one, float *wH[], float *wV[], float alpha ) { const int offset = 1; #ifdef _OPENMP const int threads = dwt_util_get_num_threads(); #endif const int size_min = min(size_x, size_y); const int size_max = max(size_x, size_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_max : size_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_x_j = ceil_div_pow2(size_x, j); const int size_y_j = ceil_div_pow2(size_y, j); const int stride_y_j = stride_y * (1 << j); const int stride_x_j = stride_x * (1 << j); #ifdef _OPENMP const int threads_segment_y = ceil_div(size_y_j, threads); const int threads_segment_x = ceil_div(size_x_j, threads); #endif wH[j] = dwt_util_alloc(size_y_j * size_x_j, sizeof(float)); wV[j] = dwt_util_alloc(size_x_j * size_y_j, sizeof(float)); if( size_x_j > 1 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_y_j; y++) { dwt_calc_eaw_w_stride_s( &wH[j][y*size_x_j], addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j, alpha ); } } if( size_y_j > 1 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x_j; x++) { dwt_calc_eaw_w_stride_s( &wV[j][x*size_y_j], addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j, alpha ); } } if( size_x_j > 1 && size_x_j < 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_eaw53_short_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j, &wH[j][y*size_x_j] ); } } if( size_x_j > 1 && size_x_j >= 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_eaw53_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j, &wH[j][y*size_x_j] ); } } if( size_x_j > 1 && size_x_j >= 3 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_y_j; y++) { fdwt_eaw53_horizontal_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j, &wH[j][y*size_x_j+offset] ); } } if( size_x_j > 1 && size_x_j >= 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_eaw53_epilog_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j, &wH[j][y*size_x_j+offset] ); } } if( size_y_j > 1 && size_y_j < 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_eaw53_short_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j, &wV[j][x*size_y_j] ); } } if( size_y_j > 1 && size_y_j >= 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_eaw53_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j, &wV[j][x*size_y_j] ); } } if( size_y_j > 1 && size_y_j >= 3 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x_j; x++) { fdwt_eaw53_horizontal_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j, &wV[j][x*size_y_j+offset] ); } } if( size_y_j > 1 && size_y_j >= 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_eaw53_epilog_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j, &wV[j][x*size_y_j+offset] ); } } j++; } } void fdwt2_eaw53_vertical_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int *j_max_ptr, int decompose_one, float *wH[], float *wV[], float alpha ) { const int offset = 1; #ifdef _OPENMP const int threads = dwt_util_get_num_threads(); #endif const int size_min = min(size_x, size_y); const int size_max = max(size_x, size_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_max : size_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_x_j = ceil_div_pow2(size_x, j); const int size_y_j = ceil_div_pow2(size_y, j); const int stride_y_j = stride_y * (1 << j); const int stride_x_j = stride_x * (1 << j); #ifdef _OPENMP const int threads_segment_y = ceil_div(size_y_j, threads); const int threads_segment_x = ceil_div(size_x_j, threads); #endif wH[j] = dwt_util_alloc(size_y_j * size_x_j, sizeof(float)); wV[j] = dwt_util_alloc(size_x_j * size_y_j, sizeof(float)); if( size_x_j > 1 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_y_j; y++) { dwt_calc_eaw_w_stride_s( &wH[j][y*size_x_j], addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j, alpha ); } } if( size_y_j > 1 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x_j; x++) { dwt_calc_eaw_w_stride_s( &wV[j][x*size_y_j], addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j, alpha ); } } if( size_x_j > 1 && size_x_j < 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_eaw53_short_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j, &wH[j][y*size_x_j] ); } } if( size_x_j > 1 && size_x_j >= 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_eaw53_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j, &wH[j][y*size_x_j] ); } } if( size_x_j > 1 && size_x_j >= 3 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_y_j; y++) { fdwt_eaw53_vertical_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j, &wH[j][y*size_x_j+offset] ); } } if( size_x_j > 1 && size_x_j >= 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_eaw53_epilog_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j, &wH[j][y*size_x_j+offset] ); } } if( size_y_j > 1 && size_y_j < 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_eaw53_short_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j, &wV[j][x*size_y_j] ); } } if( size_y_j > 1 && size_y_j >= 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_eaw53_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j, &wV[j][x*size_y_j] ); } } if( size_y_j > 1 && size_y_j >= 3 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x_j; x++) { fdwt_eaw53_vertical_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j, &wV[j][x*size_y_j+offset] ); } } if( size_y_j > 1 && size_y_j >= 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_eaw53_epilog_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j, &wV[j][x*size_y_j+offset] ); } } j++; } } void fdwt2_eaw53_diagonal_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int *j_max_ptr, int decompose_one, float *wH[], float *wV[], float alpha ) { const int offset = 1; #ifdef _OPENMP const int threads = dwt_util_get_num_threads(); #endif const int size_min = min(size_x, size_y); const int size_max = max(size_x, size_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_max : size_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_x_j = ceil_div_pow2(size_x, j); const int size_y_j = ceil_div_pow2(size_y, j); const int stride_y_j = stride_y * (1 << j); const int stride_x_j = stride_x * (1 << j); #ifdef _OPENMP const int threads_segment_y = ceil_div(size_y_j, threads); const int threads_segment_x = ceil_div(size_x_j, threads); #endif wH[j] = dwt_util_alloc(size_y_j * size_x_j, sizeof(float)); wV[j] = dwt_util_alloc(size_x_j * size_y_j, sizeof(float)); if( size_x_j > 1 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_y_j; y++) { dwt_calc_eaw_w_stride_s( &wH[j][y*size_x_j], addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j, alpha ); } } if( size_y_j > 1 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x_j; x++) { dwt_calc_eaw_w_stride_s( &wV[j][x*size_y_j], addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j, alpha ); } } if( size_x_j > 1 && size_x_j < 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_eaw53_short_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j, &wH[j][y*size_x_j] ); } } if( size_x_j > 1 && size_x_j >= 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_eaw53_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j, &wH[j][y*size_x_j] ); } } if( size_x_j > 1 && size_x_j >= 3 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_y_j; y++) { fdwt_eaw53_diagonal_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j, &wH[j][y*size_x_j+offset] ); } } if( size_x_j > 1 && size_x_j >= 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_eaw53_epilog_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j, &wH[j][y*size_x_j+offset] ); } } if( size_y_j > 1 && size_y_j < 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_eaw53_short_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j, &wV[j][x*size_y_j] ); } } if( size_y_j > 1 && size_y_j >= 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_eaw53_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j, &wV[j][x*size_y_j] ); } } if( size_y_j > 1 && size_y_j >= 3 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x_j; x++) { fdwt_eaw53_diagonal_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j, &wV[j][x*size_y_j+offset] ); } } if( size_y_j > 1 && size_y_j >= 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_eaw53_epilog_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j, &wV[j][x*size_y_j+offset] ); } } j++; } } void fdwt2_cdf97_diagonal_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int *j_max_ptr, int decompose_one ) { const int offset = 1; #ifdef _OPENMP const int threads = dwt_util_get_num_threads(); #endif const int size_min = min(size_x, size_y); const int size_max = max(size_x, size_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_max : size_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_x_j = ceil_div_pow2(size_x, j); const int size_y_j = ceil_div_pow2(size_y, j); const int stride_y_j = stride_y * (1 << j); const int stride_x_j = stride_x * (1 << j); #ifdef _OPENMP const int threads_segment_y = ceil_div(size_y_j, threads); const int threads_segment_x = ceil_div(size_x_j, threads); #endif if( size_x_j > 1 && size_x_j < 5 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_short_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_y_j > 1 && size_y_j < 5 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_short_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 5 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 5 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 5 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_diagonal_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 5 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_diagonal_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 5 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf97_epilog_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 5 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf97_epilog_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } j++; } } void fdwt2_cdf53_diagonal_s( void *ptr, int size_x, int size_y, int stride_x, int stride_y, int *j_max_ptr, int decompose_one ) { const int offset = 1; #ifdef _OPENMP const int threads = dwt_util_get_num_threads(); #endif const int size_min = min(size_x, size_y); const int size_max = max(size_x, size_y); int j = 0; const int j_limit = ceil_log2( decompose_one ? size_max : size_min ); if( *j_max_ptr < 0 || *j_max_ptr > j_limit ) *j_max_ptr = j_limit; for(;;) { if( *j_max_ptr == j ) break; const int size_x_j = ceil_div_pow2(size_x, j); const int size_y_j = ceil_div_pow2(size_y, j); const int stride_y_j = stride_y * (1 << j); const int stride_x_j = stride_x * (1 << j); #ifdef _OPENMP const int threads_segment_y = ceil_div(size_y_j, threads); const int threads_segment_x = ceil_div(size_x_j, threads); #endif if( size_x_j > 1 && size_x_j < 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf53_short_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_y_j > 1 && size_y_j < 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf53_short_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf53_prolog_s( addr2_s(ptr, y, 0, stride_x_j, stride_y_j), size_x_j, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf53_prolog_s( addr2_s(ptr, 0, x, stride_x_j, stride_y_j), size_y_j, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 3 ) { #pragma omp parallel for schedule(static, threads_segment_y) for(int y = 0; y < size_y_j; y++) { fdwt_cdf53_diagonal_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 3 ) { #pragma omp parallel for schedule(static, threads_segment_x) for(int x = 0; x < size_x_j; x++) { fdwt_cdf53_diagonal_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } if( size_x_j > 1 && size_x_j >= 3 ) { for(int y = 0; y < size_y_j; y++) { fdwt_cdf53_epilog_s( addr2_s(ptr, y, 0+offset, stride_x_j, stride_y_j), size_x_j-offset, stride_y_j); } } if( size_y_j > 1 && size_y_j >= 3 ) { for(int x = 0; x < size_x_j; x++) { fdwt_cdf53_epilog_s( addr2_s(ptr, 0+offset, x, stride_x_j, stride_y_j), size_y_j-offset, stride_x_j); } } j++; } }
adi-brisbane.c
//-------------------------------------------------------------------------// // // // This benchmark is a serial C version of the NPB SP code. This C // // version is developed by the Center for Manycore Programming at Seoul // // National University and derived from the serial Fortran versions in // // "NPB3.3-SER" developed by NAS. // // // // Permission to use, copy, distribute and modify this software for any // // purpose with or without fee is hereby granted. This software is // // provided "as is" without express or implied warranty. // // // // Information on NPB 3.3, including the technical report, the original // // specifications, source code, results and information on how to submit // // new results, is available at: // // // // http://www.nas.nasa.gov/Software/NPB/ // // // // Send comments or suggestions for this C version to cmp@aces.snu.ac.kr // // // // Center for Manycore Programming // // School of Computer Science and Engineering // // Seoul National University // // Seoul 151-744, Korea // // // // E-mail: cmp@aces.snu.ac.kr // // // //-------------------------------------------------------------------------// //-------------------------------------------------------------------------// // Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, // // and Jaejin Lee // //-------------------------------------------------------------------------// #include "header-brisbane.h" void adi() { compute_rhs(); txinvr(); x_solve(); y_solve(); z_solve(); add(); } void ninvr() { int i, j, k; double r1, r2, r3, r4, r5, t1, t2; size_t kernel_ninvr_0_off[3] = { 1, 1, 1 }; size_t kernel_ninvr_0_idx[3] = { nx2, ny2, nz2 }; brisbane_kernel kernel_ninvr_0; brisbane_kernel_create("ninvr_0", &kernel_ninvr_0); brisbane_kernel_setmem(kernel_ninvr_0, 0, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_ninvr_0, 1, sizeof(double), &bt); brisbane_task task0; brisbane_task_create(&task0); brisbane_task_kernel(task0, kernel_ninvr_0, 3, kernel_ninvr_0_off, kernel_ninvr_0_idx); brisbane_task_submit(task0, brisbane_default, NULL, true); #if 0 #pragma omp target //present(rhs) #ifdef SPEC_USE_INNER_SIMD #pragma omp teams distribute parallel for collapse(2) private(i,j,k,r1,r2,r3,r4,r5,t1,t2) #else #pragma omp teams distribute parallel for simd collapse(3) private(r1,r2,r3,r4,r5,t1,t2) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(r1,r2,r3,r4,r5,t1,t2) #endif for (i = 1; i <= nx2; i++) { r1 = rhs[0][k][j][i]; r2 = rhs[1][k][j][i]; r3 = rhs[2][k][j][i]; r4 = rhs[3][k][j][i]; r5 = rhs[4][k][j][i]; t1 = bt * r3; t2 = 0.5 * ( r4 + r5 ); rhs[0][k][j][i] = -r2; rhs[1][k][j][i] = r1; rhs[2][k][j][i] = bt * ( r4 - r5 ); rhs[3][k][j][i] = -t1 + t2; rhs[4][k][j][i] = t1 + t2; } } } #endif } void pinvr() { int i, j, k; double r1, r2, r3, r4, r5, t1, t2; size_t kernel_pinvr_0_off[3] = { 1, 1, 1 }; size_t kernel_pinvr_0_idx[3] = { nx2, ny2, nz2 }; brisbane_kernel kernel_pinvr_0; brisbane_kernel_create("pinvr_0", &kernel_pinvr_0); brisbane_kernel_setmem(kernel_pinvr_0, 0, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_pinvr_0, 1, sizeof(double), &bt); brisbane_task task0; brisbane_task_create(&task0); brisbane_task_kernel(task0, kernel_pinvr_0, 3, kernel_pinvr_0_off, kernel_pinvr_0_idx); brisbane_task_submit(task0, brisbane_default, NULL, true); #if 0 #pragma omp target //present(rhs) #ifdef SPEC_USE_INNER_SIMD #pragma omp teams distribute parallel for private(i,j,k,r1,r2,r3,r4,r5,t1,t2) collapse(2) #else #pragma omp teams distribute parallel for simd private(r1,r2,r3,r4,r5,t1,t2) collapse(3) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(r1,r2,r3,r4,r5,t1,t2) #endif for (i = 1; i <= nx2; i++) { r1 = rhs[0][k][j][i]; r2 = rhs[1][k][j][i]; r3 = rhs[2][k][j][i]; r4 = rhs[3][k][j][i]; r5 = rhs[4][k][j][i]; t1 = bt * r1; t2 = 0.5 * ( r4 + r5 ); rhs[0][k][j][i] = bt * ( r4 - r5 ); rhs[1][k][j][i] = -r3; rhs[2][k][j][i] = r2; rhs[3][k][j][i] = -t1 + t2; rhs[4][k][j][i] = t1 + t2; } } } #endif } void tzetar() { int i, j, k; double t1, t2, t3, ac, xvel, yvel, zvel, r1, r2, r3, r4, r5; double btuz, ac2u, uzik1; size_t kernel_tzetar_0_off[3] = { 1, 1, 1 }; size_t kernel_tzetar_0_idx[3] = { nx2, ny2, nz2 }; brisbane_kernel kernel_tzetar_0; brisbane_kernel_create("tzetar_0", &kernel_tzetar_0); brisbane_kernel_setmem(kernel_tzetar_0, 0, mem_us, brisbane_rd); brisbane_kernel_setmem(kernel_tzetar_0, 1, mem_vs, brisbane_rd); brisbane_kernel_setmem(kernel_tzetar_0, 2, mem_ws, brisbane_rd); brisbane_kernel_setmem(kernel_tzetar_0, 3, mem_qs, brisbane_rd); brisbane_kernel_setmem(kernel_tzetar_0, 4, mem_u, brisbane_rd); brisbane_kernel_setmem(kernel_tzetar_0, 5, mem_speed, brisbane_rd); brisbane_kernel_setmem(kernel_tzetar_0, 6, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_tzetar_0, 7, sizeof(double), &bt); brisbane_kernel_setarg(kernel_tzetar_0, 8, sizeof(double), &c2iv); brisbane_task task0; brisbane_task_create(&task0); brisbane_task_kernel(task0, kernel_tzetar_0, 3, kernel_tzetar_0_off, kernel_tzetar_0_idx); brisbane_task_submit(task0, brisbane_default, NULL, true); #if 0 #pragma omp target //present(us,vs,ws,qs,u,speed,rhs) #ifdef SPEC_USE_INNER_SIMD #pragma omp teams distribute parallel for collapse(2) private(i,j,k,t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,ac2u,uzik1) #else #pragma omp teams distribute parallel for simd collapse(3) private(t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,ac2u,uzik1) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(t1,t2,t3,ac,xvel,yvel,zvel,r1,r2,r3,r4,r5,btuz,ac2u,uzik1) #endif for (i = 1; i <= nx2; i++) { xvel = us[k][j][i]; yvel = vs[k][j][i]; zvel = ws[k][j][i]; ac = speed[k][j][i]; ac2u = ac*ac; r1 = rhs[0][k][j][i]; r2 = rhs[1][k][j][i]; r3 = rhs[2][k][j][i]; r4 = rhs[3][k][j][i]; r5 = rhs[4][k][j][i]; uzik1 = u[0][k][j][i]; btuz = bt * uzik1; t1 = btuz/ac * (r4 + r5); t2 = r3 + t1; t3 = btuz * (r4 - r5); rhs[0][k][j][i] = t2; rhs[1][k][j][i] = -uzik1*r2 + xvel*t2; rhs[2][k][j][i] = uzik1*r1 + yvel*t2; rhs[3][k][j][i] = zvel*t2 + t3; rhs[4][k][j][i] = uzik1*(-xvel*r2 + yvel*r1) + qs[k][j][i]*t2 + c2iv*ac2u*t1 + zvel*t3; } } } #endif } void x_solve() { int i, j, k, i1, i2, m; int gp01,gp02,gp03,gp04; double ru1, fac1, fac2; double lhsX[5][nz2+1][IMAXP+1][IMAXP+1]; double lhspX[5][nz2+1][IMAXP+1][IMAXP+1]; double lhsmX[5][nz2+1][IMAXP+1][IMAXP+1]; double rhonX[nz2+1][IMAXP+1][PROBLEM_SIZE]; double rhsX[5][nz2+1][IMAXP+1][JMAXP+1]; int ni=nx2+1; gp01=grid_points[0]-1; gp02=grid_points[0]-2; gp03=grid_points[0]-3; gp04=grid_points[0]-4; brisbane_mem mem_lhsX; brisbane_mem mem_lhspX; brisbane_mem mem_lhsmX; brisbane_mem mem_rhonX; brisbane_mem mem_rhsX; brisbane_mem_create(5 * (nz2 + 1) * (IMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_lhsX); brisbane_mem_create(5 * (nz2 + 1) * (IMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_lhspX); brisbane_mem_create(5 * (nz2 + 1) * (IMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_lhsmX); brisbane_mem_create((nz2 + 1) * (IMAXP + 1) * (PROBLEM_SIZE) * sizeof(double), &mem_rhonX); brisbane_mem_create(5 * (nz2 + 1) * (IMAXP + 1) * (JMAXP + 1) * sizeof(double), &mem_rhsX); #pragma omp target data map(alloc:lhsX[:][:][:][:],lhspX[:][:][:][:], lhsmX[:][:][:][:],rhonX[:][:][:],rhsX[:][:][:][:]) //present(rho_i,us,speed,rhs) { size_t kernel_x_solve_0_off[3] = { 0, 0, 0 }; size_t kernel_x_solve_0_idx[3] = { IMAXP + 1, JMAXP + 1, nz2 + 1 }; brisbane_kernel kernel_x_solve_0; brisbane_kernel_create("x_solve_0", &kernel_x_solve_0); brisbane_kernel_setmem(kernel_x_solve_0, 0, mem_rhsX, brisbane_wr); brisbane_kernel_setmem(kernel_x_solve_0, 1, mem_rhs, brisbane_rd); brisbane_task task0; brisbane_task_create(&task0); brisbane_task_kernel(task0, kernel_x_solve_0, 3, kernel_x_solve_0_off, kernel_x_solve_0_idx); brisbane_task_submit(task0, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for collapse(2) private(i,j,k) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (k = 0; k <= nz2; k++) { for (j = 0; j <= JMAXP; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 0; i <= IMAXP; i++) { rhsX[0][k][i][j] = rhs[0][k][j][i]; rhsX[1][k][i][j] = rhs[1][k][j][i]; rhsX[2][k][i][j] = rhs[2][k][j][i]; rhsX[3][k][i][j] = rhs[3][k][j][i]; rhsX[4][k][i][j] = rhs[4][k][j][i]; } } } #endif size_t kernel_x_solve_1_off[2] = { 1, 1 }; size_t kernel_x_solve_1_idx[2] = { ny2, nz2 }; brisbane_kernel kernel_x_solve_1; brisbane_kernel_create("x_solve_1", &kernel_x_solve_1); brisbane_kernel_setmem(kernel_x_solve_1, 0, mem_lhsX, brisbane_wr); brisbane_kernel_setmem(kernel_x_solve_1, 1, mem_lhspX, brisbane_wr); brisbane_kernel_setmem(kernel_x_solve_1, 2, mem_lhsmX, brisbane_wr); brisbane_kernel_setarg(kernel_x_solve_1, 3, sizeof(int), &ni); brisbane_task task1; brisbane_task_create(&task1); brisbane_task_kernel(task1, kernel_x_solve_1, 2, kernel_x_solve_1_off, kernel_x_solve_1_idx); brisbane_task_submit(task1, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(k,j,m) collapse(2) for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (m = 0; m < 5; m++) { lhsX[m][k][0][j] = 0.0; lhspX[m][k][0][j] = 0.0; lhsmX[m][k][0][j] = 0.0; lhsX[m][k][ni][j] = 0.0; lhspX[m][k][ni][j] = 0.0; lhsmX[m][k][ni][j] = 0.0; } lhsX[2][k][0][j] = 1.0; lhspX[2][k][0][j] = 1.0; lhsmX[2][k][0][j] = 1.0; lhsX[2][k][ni][j] = 1.0; lhspX[2][k][ni][j] = 1.0; lhsmX[2][k][ni][j] = 1.0; } } #endif //--------------------------------------------------------------------- // Computes the left hand side for the three x-factors //--------------------------------------------------------------------- //--------------------------------------------------------------------- // first fill the lhs for the u-eigenvalue //--------------------------------------------------------------------- size_t kernel_x_solve_2_off[2] = { 1, 1 }; size_t kernel_x_solve_2_idx[2] = { ny2, nz2 }; brisbane_kernel kernel_x_solve_2; brisbane_kernel_create("x_solve_2", &kernel_x_solve_2); brisbane_kernel_setmem(kernel_x_solve_2, 0, mem_rho_i, brisbane_rd); brisbane_kernel_setmem(kernel_x_solve_2, 1, mem_rhonX, brisbane_rw); brisbane_kernel_setmem(kernel_x_solve_2, 2, mem_lhsX, brisbane_rw); brisbane_kernel_setmem(kernel_x_solve_2, 3, mem_us, brisbane_rd); brisbane_kernel_setarg(kernel_x_solve_2, 4, sizeof(int), &gp01); brisbane_kernel_setarg(kernel_x_solve_2, 5, sizeof(double), &dx1); brisbane_kernel_setarg(kernel_x_solve_2, 6, sizeof(double), &dx2); brisbane_kernel_setarg(kernel_x_solve_2, 7, sizeof(double), &dx5); brisbane_kernel_setarg(kernel_x_solve_2, 8, sizeof(double), &dxmax); brisbane_kernel_setarg(kernel_x_solve_2, 9, sizeof(double), &c1c5); brisbane_kernel_setarg(kernel_x_solve_2, 10, sizeof(double), &c3c4); brisbane_kernel_setarg(kernel_x_solve_2, 11, sizeof(double), &dttx1); brisbane_kernel_setarg(kernel_x_solve_2, 12, sizeof(double), &dttx2); brisbane_kernel_setarg(kernel_x_solve_2, 13, sizeof(double), &c2dttx1); brisbane_kernel_setarg(kernel_x_solve_2, 14, sizeof(double), &con43); brisbane_task task2; brisbane_task_create(&task2); brisbane_task_kernel(task2, kernel_x_solve_2, 2, kernel_x_solve_2_off, kernel_x_solve_2_idx); brisbane_task_submit(task2, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for collapse(2) private(i,j,k,ru1) for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #pragma omp simd private(ru1) for (i = 0; i <= gp01; i++) { ru1 = c3c4*rho_i[k][j][i]; rhonX[k][j][i] = max(max(dx2+con43*ru1,dx5+c1c5*ru1), max(dxmax+ru1,dx1)); } #pragma omp simd for (i = 1; i <= nx2; i++) { lhsX[0][k][i][j] = 0.0; lhsX[1][k][i][j] = -dttx2 * us[k][j][i-1] - dttx1 * rhonX[k][j][i-1]; lhsX[2][k][i][j] = 1.0 + c2dttx1 * rhonX[k][j][i]; lhsX[3][k][i][j] = dttx2 * us[k][j][i+1] - dttx1 * rhonX[k][j][i+1]; lhsX[4][k][i][j] = 0.0; } } } #endif //--------------------------------------------------------------------- // add fourth order dissipation //--------------------------------------------------------------------- i = 1; size_t kernel_x_solve_3_off[2] = { 1, 1 }; size_t kernel_x_solve_3_idx[2] = { ny2, nz2 }; brisbane_kernel kernel_x_solve_3; brisbane_kernel_create("x_solve_3", &kernel_x_solve_3); brisbane_kernel_setmem(kernel_x_solve_3, 0, mem_lhsX, brisbane_rw); brisbane_kernel_setarg(kernel_x_solve_3, 1, sizeof(int), &i); brisbane_kernel_setarg(kernel_x_solve_3, 2, sizeof(double), &comz1); brisbane_kernel_setarg(kernel_x_solve_3, 3, sizeof(double), &comz4); brisbane_kernel_setarg(kernel_x_solve_3, 4, sizeof(double), &comz5); brisbane_kernel_setarg(kernel_x_solve_3, 5, sizeof(double), &comz6); brisbane_task task3; brisbane_task_create(&task3); brisbane_task_kernel(task3, kernel_x_solve_3, 2, kernel_x_solve_3_off, kernel_x_solve_3_idx); brisbane_task_submit(task3, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(k,j) #else #pragma omp target teams distribute parallel for simd collapse(2) #endif for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (j = 1; j <= ny2; j++) { lhsX[2][k][i][j] = lhsX[2][k][i][j] + comz5; lhsX[3][k][i][j] = lhsX[3][k][i][j] - comz4; lhsX[4][k][i][j] = lhsX[4][k][i][j] + comz1; lhsX[1][k][i+1][j] = lhsX[1][k][i+1][j] - comz4; lhsX[2][k][i+1][j] = lhsX[2][k][i+1][j] + comz6; lhsX[3][k][i+1][j] = lhsX[3][k][i+1][j] - comz4; lhsX[4][k][i+1][j] = lhsX[4][k][i+1][j] + comz1; } } #endif size_t kernel_x_solve_4_off[3] = { 3, 1, 1 }; size_t kernel_x_solve_4_idx[3] = { gp04 - 2, ny2, nz2 }; brisbane_kernel kernel_x_solve_4; brisbane_kernel_create("x_solve_4", &kernel_x_solve_4); brisbane_kernel_setmem(kernel_x_solve_4, 0, mem_lhsX, brisbane_rw); brisbane_kernel_setarg(kernel_x_solve_4, 1, sizeof(double), &comz1); brisbane_kernel_setarg(kernel_x_solve_4, 2, sizeof(double), &comz4); brisbane_kernel_setarg(kernel_x_solve_4, 3, sizeof(double), &comz6); brisbane_task task4; brisbane_task_create(&task4); brisbane_task_kernel(task4, kernel_x_solve_4, 3, kernel_x_solve_4_off, kernel_x_solve_4_idx); brisbane_task_submit(task4, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 3; i <= gp04; i++) { lhsX[0][k][i][j] = lhsX[0][k][i][j] + comz1; lhsX[1][k][i][j] = lhsX[1][k][i][j] - comz4; lhsX[2][k][i][j] = lhsX[2][k][i][j] + comz6; lhsX[3][k][i][j] = lhsX[3][k][i][j] - comz4; lhsX[4][k][i][j] = lhsX[4][k][i][j] + comz1; } } } #endif i = gp03; size_t kernel_x_solve_5_off[2] = { 1, 1 }; size_t kernel_x_solve_5_idx[2] = { ny2, nz2 }; brisbane_kernel kernel_x_solve_5; brisbane_kernel_create("x_solve_5", &kernel_x_solve_5); brisbane_kernel_setmem(kernel_x_solve_5, 0, mem_lhsX, brisbane_rw); brisbane_kernel_setarg(kernel_x_solve_5, 1, sizeof(int), &i); brisbane_kernel_setarg(kernel_x_solve_5, 2, sizeof(double), &comz1); brisbane_kernel_setarg(kernel_x_solve_5, 3, sizeof(double), &comz4); brisbane_kernel_setarg(kernel_x_solve_5, 4, sizeof(double), &comz5); brisbane_kernel_setarg(kernel_x_solve_5, 5, sizeof(double), &comz6); brisbane_task task5; brisbane_task_create(&task5); brisbane_task_kernel(task5, kernel_x_solve_5, 2, kernel_x_solve_5_off, kernel_x_solve_5_idx); brisbane_task_submit(task5, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(j,k) #else #pragma omp target teams distribute parallel for simd collapse(2) #endif for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (j = 1; j <= ny2; j++) { lhsX[0][k][i][j] = lhsX[0][k][i][j] + comz1; lhsX[1][k][i][j] = lhsX[1][k][i][j] - comz4; lhsX[2][k][i][j] = lhsX[2][k][i][j] + comz6; lhsX[3][k][i][j] = lhsX[3][k][i][j] - comz4; lhsX[0][k][i+1][j] = lhsX[0][k][i+1][j] + comz1; lhsX[1][k][i+1][j] = lhsX[1][k][i+1][j] - comz4; lhsX[2][k][i+1][j] = lhsX[2][k][i+1][j] + comz5; } } #endif //--------------------------------------------------------------------- // subsequently, fill the other factors (u+c), (u-c) by adding to // the first //--------------------------------------------------------------------- size_t kernel_x_solve_6_off[3] = { 1, 1, 1 }; size_t kernel_x_solve_6_idx[3] = { nx2, ny2, nz2 }; brisbane_kernel kernel_x_solve_6; brisbane_kernel_create("x_solve_6", &kernel_x_solve_6); brisbane_kernel_setmem(kernel_x_solve_6, 0, mem_lhspX, brisbane_wr); brisbane_kernel_setmem(kernel_x_solve_6, 1, mem_lhsmX, brisbane_wr); brisbane_kernel_setmem(kernel_x_solve_6, 2, mem_lhsX, brisbane_rd); brisbane_kernel_setmem(kernel_x_solve_6, 3, mem_speed, brisbane_rd); brisbane_kernel_setarg(kernel_x_solve_6, 4, sizeof(double), &dttx2); brisbane_task task6; brisbane_task_create(&task6); brisbane_task_kernel(task6, kernel_x_solve_6, 3, kernel_x_solve_6_off, kernel_x_solve_6_idx); brisbane_task_submit(task6, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= nx2; i++) { lhspX[0][k][i][j] = lhsX[0][k][i][j]; lhspX[1][k][i][j] = lhsX[1][k][i][j] - dttx2 * speed[k][j][i-1]; lhspX[2][k][i][j] = lhsX[2][k][i][j]; lhspX[3][k][i][j] = lhsX[3][k][i][j] + dttx2 * speed[k][j][i+1]; lhspX[4][k][i][j] = lhsX[4][k][i][j]; lhsmX[0][k][i][j] = lhsX[0][k][i][j]; lhsmX[1][k][i][j] = lhsX[1][k][i][j] + dttx2 * speed[k][j][i-1]; lhsmX[2][k][i][j] = lhsX[2][k][i][j]; lhsmX[3][k][i][j] = lhsX[3][k][i][j] - dttx2 * speed[k][j][i+1]; lhsmX[4][k][i][j] = lhsX[4][k][i][j]; } } } #endif //--------------------------------------------------------------------- // FORWARD ELIMINATION //--------------------------------------------------------------------- //--------------------------------------------------------------------- // perform the Thomas algorithm; first, FORWARD ELIMINATION //--------------------------------------------------------------------- size_t kernel_x_solve_7_off[2] = { 1, 1 }; size_t kernel_x_solve_7_idx[2] = { ny2, nz2 }; brisbane_kernel kernel_x_solve_7; brisbane_kernel_create("x_solve_7", &kernel_x_solve_7); brisbane_kernel_setmem(kernel_x_solve_7, 0, mem_lhsX, brisbane_rw); brisbane_kernel_setmem(kernel_x_solve_7, 1, mem_rhsX, brisbane_rw); brisbane_kernel_setarg(kernel_x_solve_7, 2, sizeof(int), &gp03); brisbane_task task7; brisbane_task_create(&task7); brisbane_task_kernel(task7, kernel_x_solve_7, 2, kernel_x_solve_7_off, kernel_x_solve_7_idx); brisbane_task_submit(task7, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd collapse(2) private(i,m,i1,i2,fac1) #endif for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(i1,i2,fac1) #endif for (j = 1; j <= ny2; j++) { for (i = 0; i <= gp03; i++) { i1 = i + 1; i2 = i + 2; fac1 = 1.0/lhsX[2][k][i][j]; lhsX[3][k][i][j] = fac1*lhsX[3][k][i][j]; lhsX[4][k][i][j] = fac1*lhsX[4][k][i][j]; for (m = 0; m < 3; m++) { rhsX[m][k][i][j] = fac1*rhsX[m][k][i][j]; } lhsX[2][k][i1][j] = lhsX[2][k][i1][j] - lhsX[1][k][i1][j]*lhsX[3][k][i][j]; lhsX[3][k][i1][j] = lhsX[3][k][i1][j] - lhsX[1][k][i1][j]*lhsX[4][k][i][j]; for (m = 0; m < 3; m++) { rhsX[m][k][i1][j] = rhsX[m][k][i1][j] - lhsX[1][k][i1][j]*rhsX[m][k][i][j]; } lhsX[1][k][i2][j] = lhsX[1][k][i2][j] - lhsX[0][k][i2][j]*lhsX[3][k][i][j]; lhsX[2][k][i2][j] = lhsX[2][k][i2][j] - lhsX[0][k][i2][j]*lhsX[4][k][i][j]; for (m = 0; m < 3; m++) { rhsX[m][k][i2][j] = rhsX[m][k][i2][j] - lhsX[0][k][i2][j]*rhsX[m][k][i][j]; } } } } #endif //--------------------------------------------------------------------- // The last two rows in this grid block are a bit different, // since they for (not have two more rows available for the // elimination of off-diagonal entries //--------------------------------------------------------------------- i = gp02; i1 = gp01; size_t kernel_x_solve_8_off[2] = { 1, 1 }; size_t kernel_x_solve_8_idx[2] = { ny2, nz2 }; brisbane_kernel kernel_x_solve_8; brisbane_kernel_create("x_solve_8", &kernel_x_solve_8); brisbane_kernel_setmem(kernel_x_solve_8, 0, mem_lhsX, brisbane_wr); brisbane_kernel_setmem(kernel_x_solve_8, 1, mem_rhsX, brisbane_wr); brisbane_kernel_setarg(kernel_x_solve_8, 2, sizeof(int), &i); brisbane_kernel_setarg(kernel_x_solve_8, 3, sizeof(int), &i1); brisbane_task task8; brisbane_task_create(&task8); brisbane_task_kernel(task8, kernel_x_solve_8, 2, kernel_x_solve_8_off, kernel_x_solve_8_idx); brisbane_task_submit(task8, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(j,k,m,fac1,fac2) collapse(2) for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { fac1 = 1.0/lhsX[2][k][i][j]; lhsX[3][k][i][j] = fac1*lhsX[3][k][i][j]; lhsX[4][k][i][j] = fac1*lhsX[4][k][i][j]; for (m = 0; m < 3; m++) { rhsX[m][k][i][j] = fac1*rhsX[m][k][i][j]; } lhsX[2][k][i1][j] = lhsX[2][k][i1][j] - lhsX[1][k][i1][j]*lhsX[3][k][i][j]; lhsX[3][k][i1][j] = lhsX[3][k][i1][j] - lhsX[1][k][i1][j]*lhsX[4][k][i][j]; for (m = 0; m < 3; m++) { rhsX[m][k][i1][j] = rhsX[m][k][i1][j] - lhsX[1][k][i1][j]*rhsX[m][k][i][j]; } //--------------------------------------------------------------------- // scale the last row immediately //--------------------------------------------------------------------- fac2 = 1.0/lhsX[2][k][i1][j]; for (m = 0; m < 3; m++) { rhsX[m][k][i1][j] = fac2*rhsX[m][k][i1][j]; } } } #endif //--------------------------------------------------------------------- // for (the u+c and the u-c factors //--------------------------------------------------------------------- size_t kernel_x_solve_9_off[2] = { 1, 1 }; size_t kernel_x_solve_9_idx[2] = { ny2, nz2 }; brisbane_kernel kernel_x_solve_9; brisbane_kernel_create("x_solve_9", &kernel_x_solve_9); brisbane_kernel_setmem(kernel_x_solve_9, 0, mem_lhspX, brisbane_wr); brisbane_kernel_setmem(kernel_x_solve_9, 1, mem_lhsmX, brisbane_wr); brisbane_kernel_setmem(kernel_x_solve_9, 2, mem_rhsX, brisbane_wr); brisbane_kernel_setarg(kernel_x_solve_9, 3, sizeof(int), &gp03); brisbane_task task9; brisbane_task_create(&task9); brisbane_task_kernel(task9, kernel_x_solve_9, 2, kernel_x_solve_9_off, kernel_x_solve_9_idx); brisbane_task_submit(task9, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd collapse(2) private(i,m,fac1,i1,i2) #endif for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(fac1,i1,i2) #endif for (j = 1; j <= ny2; j++) { for (i = 0; i <= gp03; i++) { i1 = i + 1; i2 = i + 2; m = 3; fac1 = 1.0/lhspX[2][k][i][j]; lhspX[3][k][i][j] = fac1*lhspX[3][k][i][j]; lhspX[4][k][i][j] = fac1*lhspX[4][k][i][j]; rhsX[m][k][i][j] = fac1*rhsX[m][k][i][j]; lhspX[2][k][i1][j] = lhspX[2][k][i1][j] - lhspX[1][k][i1][j]*lhspX[3][k][i][j]; lhspX[3][k][i1][j] = lhspX[3][k][i1][j] - lhspX[1][k][i1][j]*lhspX[4][k][i][j]; rhsX[m][k][i1][j] = rhsX[m][k][i1][j] - lhspX[1][k][i1][j]*rhsX[m][k][i][j]; lhspX[1][k][i2][j] = lhspX[1][k][i2][j] - lhspX[0][k][i2][j]*lhspX[3][k][i][j]; lhspX[2][k][i2][j] = lhspX[2][k][i2][j] - lhspX[0][k][i2][j]*lhspX[4][k][i][j]; rhsX[m][k][i2][j] = rhsX[m][k][i2][j] - lhspX[0][k][i2][j]*rhsX[m][k][i][j]; m = 4; fac1 = 1.0/lhsmX[2][k][i][j]; lhsmX[3][k][i][j] = fac1*lhsmX[3][k][i][j]; lhsmX[4][k][i][j] = fac1*lhsmX[4][k][i][j]; rhsX[m][k][i][j] = fac1*rhsX[m][k][i][j]; lhsmX[2][k][i1][j] = lhsmX[2][k][i1][j] - lhsmX[1][k][i1][j]*lhsmX[3][k][i][j]; lhsmX[3][k][i1][j] = lhsmX[3][k][i1][j] - lhsmX[1][k][i1][j]*lhsmX[4][k][i][j]; rhsX[m][k][i1][j] = rhsX[m][k][i1][j] - lhsmX[1][k][i1][j]*rhsX[m][k][i][j]; lhsmX[1][k][i2][j] = lhsmX[1][k][i2][j] - lhsmX[0][k][i2][j]*lhsmX[3][k][i][j]; lhsmX[2][k][i2][j] = lhsmX[2][k][i2][j] - lhsmX[0][k][i2][j]*lhsmX[4][k][i][j]; rhsX[m][k][i2][j] = rhsX[m][k][i2][j] - lhsmX[0][k][i2][j]*rhsX[m][k][i][j]; } } } #endif //--------------------------------------------------------------------- // And again the last two rows separately //--------------------------------------------------------------------- i = gp02; i1 = gp01; size_t kernel_x_solve_10_off[2] = { 1, 1 }; size_t kernel_x_solve_10_idx[2] = { ny2, nz2 }; brisbane_kernel kernel_x_solve_10; brisbane_kernel_create("x_solve_10", &kernel_x_solve_10); brisbane_kernel_setmem(kernel_x_solve_10, 0, mem_lhspX, brisbane_wr); brisbane_kernel_setmem(kernel_x_solve_10, 1, mem_lhsmX, brisbane_wr); brisbane_kernel_setmem(kernel_x_solve_10, 2, mem_rhsX, brisbane_wr); brisbane_kernel_setarg(kernel_x_solve_10, 3, sizeof(int), &i); brisbane_kernel_setarg(kernel_x_solve_10, 4, sizeof(int), &i1); brisbane_task task10; brisbane_task_create(&task10); brisbane_task_kernel(task10, kernel_x_solve_10, 2, kernel_x_solve_10_off, kernel_x_solve_10_idx); brisbane_task_submit(task10, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(j,k,m,fac1) #else #pragma omp target teams distribute parallel for simd collapse(2) private(m,fac1) #endif for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (j = 1; j <= ny2; j++) { m = 3; fac1 = 1.0/lhspX[2][k][i][j]; lhspX[3][k][i][j] = fac1*lhspX[3][k][i][j]; lhspX[4][k][i][j] = fac1*lhspX[4][k][i][j]; rhsX[m][k][i][j] = fac1*rhsX[m][k][i][j]; lhspX[2][k][i1][j] = lhspX[2][k][i1][j] - lhspX[1][k][i1][j]*lhspX[3][k][i][j]; lhspX[3][k][i1][j] = lhspX[3][k][i1][j] - lhspX[1][k][i1][j]*lhspX[4][k][i][j]; rhsX[m][k][i1][j] = rhsX[m][k][i1][j] - lhspX[1][k][i1][j]*rhsX[m][k][i][j]; m = 4; fac1 = 1.0/lhsmX[2][k][i][j]; lhsmX[3][k][i][j] = fac1*lhsmX[3][k][i][j]; lhsmX[4][k][i][j] = fac1*lhsmX[4][k][i][j]; rhsX[m][k][i][j] = fac1*rhsX[m][k][i][j]; lhsmX[2][k][i1][j] = lhsmX[2][k][i1][j] - lhsmX[1][k][i1][j]*lhsmX[3][k][i][j]; lhsmX[3][k][i1][j] = lhsmX[3][k][i1][j] - lhsmX[1][k][i1][j]*lhsmX[4][k][i][j]; rhsX[m][k][i1][j] = rhsX[m][k][i1][j] - lhsmX[1][k][i1][j]*rhsX[m][k][i][j]; //--------------------------------------------------------------------- // Scale the last row immediately //--------------------------------------------------------------------- rhsX[3][k][i1][j] = rhsX[3][k][i1][j]/lhspX[2][k][i1][j]; rhsX[4][k][i1][j] = rhsX[4][k][i1][j]/lhsmX[2][k][i1][j]; } } #endif //--------------------------------------------------------------------- // BACKSUBSTITUTION //--------------------------------------------------------------------- i = gp02; i1 = gp01; size_t kernel_x_solve_11_off[2] = { 1, 1 }; size_t kernel_x_solve_11_idx[2] = { ny2, nz2 }; brisbane_kernel kernel_x_solve_11; brisbane_kernel_create("x_solve_11", &kernel_x_solve_11); brisbane_kernel_setmem(kernel_x_solve_11, 0, mem_rhsX, brisbane_rw); brisbane_kernel_setmem(kernel_x_solve_11, 1, mem_lhsX, brisbane_rd); brisbane_kernel_setmem(kernel_x_solve_11, 2, mem_lhspX, brisbane_rd); brisbane_kernel_setmem(kernel_x_solve_11, 3, mem_lhsmX, brisbane_rd); brisbane_kernel_setarg(kernel_x_solve_11, 4, sizeof(int), &i); brisbane_kernel_setarg(kernel_x_solve_11, 5, sizeof(int), &i1); brisbane_task task11; brisbane_task_create(&task11); brisbane_task_kernel(task11, kernel_x_solve_11, 2, kernel_x_solve_11_off, kernel_x_solve_11_idx); brisbane_task_submit(task11, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(j,k,m) collapse(2) for (k = 1; k <= nz2; k++) { for (j = 1; j <= ny2; j++) { for (m = 0; m < 3; m++) { rhsX[m][k][i][j] = rhsX[m][k][i][j] - lhsX[3][k][i][j]*rhsX[m][k][i1][j]; } rhsX[3][k][i][j] = rhsX[3][k][i][j] - lhspX[3][k][i][j]*rhsX[3][k][i1][j]; rhsX[4][k][i][j] = rhsX[4][k][i][j] - lhsmX[3][k][i][j]*rhsX[4][k][i1][j]; } } #endif //--------------------------------------------------------------------- // The first three factors //--------------------------------------------------------------------- size_t kernel_x_solve_12_off[2] = { 1, 1 }; size_t kernel_x_solve_12_idx[2] = { ny2, nz2 }; brisbane_kernel kernel_x_solve_12; brisbane_kernel_create("x_solve_12", &kernel_x_solve_12); brisbane_kernel_setmem(kernel_x_solve_12, 0, mem_rhsX, brisbane_rw); brisbane_kernel_setmem(kernel_x_solve_12, 1, mem_lhsX, brisbane_rd); brisbane_kernel_setmem(kernel_x_solve_12, 2, mem_lhspX, brisbane_rd); brisbane_kernel_setmem(kernel_x_solve_12, 3, mem_lhsmX, brisbane_rd); brisbane_kernel_setarg(kernel_x_solve_12, 4, sizeof(int), &gp03); brisbane_task task12; brisbane_task_create(&task12); brisbane_task_kernel(task12, kernel_x_solve_12, 2, kernel_x_solve_12_off, kernel_x_solve_12_idx); brisbane_task_submit(task12, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd collapse(2) private(i,m,i1,i2) #endif for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(i1,i2) #endif for (j = 1; j <= ny2; j++) { for (i = gp03; i >= 0; i--) { i1 = i + 1; i2 = i + 2; for (m = 0; m < 3; m++) { rhsX[m][k][i][j] = rhsX[m][k][i][j] - lhsX[3][k][i][j]*rhsX[m][k][i1][j] - lhsX[4][k][i][j]*rhsX[m][k][i2][j]; } //------------------------------------------------------------------- // And the remaining two //------------------------------------------------------------------- rhsX[3][k][i][j] = rhsX[3][k][i][j] - lhspX[3][k][i][j]*rhsX[3][k][i1][j] - lhspX[4][k][i][j]*rhsX[3][k][i2][j]; rhsX[4][k][i][j] = rhsX[4][k][i][j] - lhsmX[3][k][i][j]*rhsX[4][k][i1][j] - lhsmX[4][k][i][j]*rhsX[4][k][i2][j]; } } } #endif size_t kernel_x_solve_13_off[3] = { 0, 0, 0 }; size_t kernel_x_solve_13_idx[3] = { IMAXP + 1, JMAXP + 1, nz2 + 1 }; brisbane_kernel kernel_x_solve_13; brisbane_kernel_create("x_solve_13", &kernel_x_solve_13); brisbane_kernel_setmem(kernel_x_solve_13, 0, mem_rhs, brisbane_wr); brisbane_kernel_setmem(kernel_x_solve_13, 1, mem_rhsX, brisbane_rd); brisbane_task task13; brisbane_task_create(&task13); brisbane_task_kernel(task13, kernel_x_solve_13, 3, kernel_x_solve_13_off, kernel_x_solve_13_idx); brisbane_task_submit(task13, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (k = 0; k <= nz2; k++) { for (j = 0; j <= JMAXP; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 0; i <= IMAXP; i++) { rhs[0][k][j][i] = rhsX[0][k][i][j]; rhs[1][k][j][i] = rhsX[1][k][i][j]; rhs[2][k][j][i] = rhsX[2][k][i][j]; rhs[3][k][j][i] = rhsX[3][k][i][j]; rhs[4][k][j][i] = rhsX[4][k][i][j]; } } } #endif brisbane_mem_release(mem_lhsX); brisbane_mem_release(mem_lhspX); brisbane_mem_release(mem_lhsmX); brisbane_mem_release(mem_rhonX); brisbane_mem_release(mem_rhsX); }/* end omp target data */ //--------------------------------------------------------------------- // Do the block-diagonal inversion //--------------------------------------------------------------------- ninvr(); } void y_solve() { int i, j, k, j1, j2, m; int gp0, gp1, gp2; double ru1, fac1, fac2; double lhsY[5][nz2+1][IMAXP+1][IMAXP+1]; double lhspY[5][nz2+1][IMAXP+1][IMAXP+1]; double lhsmY[5][nz2+1][IMAXP+1][IMAXP+1]; double rhoqY[nz2+1][IMAXP+1][PROBLEM_SIZE]; int ni=ny2+1; gp0=grid_points[0]; gp1=grid_points[1]; gp2=grid_points[2]; brisbane_mem mem_lhsY; brisbane_mem mem_lhspY; brisbane_mem mem_lhsmY; brisbane_mem mem_rhoqY; brisbane_mem_create(5 * (nz2 + 1) * (IMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_lhsY); brisbane_mem_create(5 * (nz2 + 1) * (IMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_lhspY); brisbane_mem_create(5 * (nz2 + 1) * (IMAXP + 1) * (IMAXP + 1) * sizeof(double), &mem_lhsmY); brisbane_mem_create((nz2 + 1) * (IMAXP + 1) * (PROBLEM_SIZE) * sizeof(double), &mem_rhoqY); #pragma omp target data map(alloc:lhsY[:][:][:][:],lhspY[:][:][:][:],lhsmY[:][:][:][:],rhoqY[:][:][:]) //present(rho_i,vs,speed,rhs) { size_t kernel_y_solve_0_off[2] = { 1, 1 }; size_t kernel_y_solve_0_idx[2] = { nx2, nz2 }; brisbane_kernel kernel_y_solve_0; brisbane_kernel_create("y_solve_0", &kernel_y_solve_0); brisbane_kernel_setmem(kernel_y_solve_0, 0, mem_lhsY, brisbane_wr); brisbane_kernel_setmem(kernel_y_solve_0, 1, mem_lhspY, brisbane_wr); brisbane_kernel_setmem(kernel_y_solve_0, 2, mem_lhsmY, brisbane_wr); brisbane_kernel_setarg(kernel_y_solve_0, 3, sizeof(int), &ni); brisbane_task task0; brisbane_task_create(&task0); brisbane_task_kernel(task0, kernel_y_solve_0, 2, kernel_y_solve_0_off, kernel_y_solve_0_idx); brisbane_task_submit(task0, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,k,m) collapse(2) for (k = 1; k <= nz2; k++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { lhsY[m][k][0][i] = 0.0; lhspY[m][k][0][i] = 0.0; lhsmY[m][k][0][i] = 0.0; lhsY[m][k][ni][i] = 0.0; lhspY[m][k][ni][i] = 0.0; lhsmY[m][k][ni][i] = 0.0; } lhsY[2][k][0][i] = 1.0; lhspY[2][k][0][i] = 1.0; lhsmY[2][k][0][i] = 1.0; lhsY[2][k][ni][i] = 1.0; lhspY[2][k][ni][i] = 1.0; lhsmY[2][k][ni][i] = 1.0; } } #endif //--------------------------------------------------------------------- // Computes the left hand side for the three y-factors //--------------------------------------------------------------------- //--------------------------------------------------------------------- // first fill the lhs for the u-eigenvalue //--------------------------------------------------------------------- size_t kernel_y_solve_1_off[2] = { 1, 1 }; size_t kernel_y_solve_1_idx[2] = { gp0 - 2, nz2 }; brisbane_kernel kernel_y_solve_1; brisbane_kernel_create("y_solve_1", &kernel_y_solve_1); brisbane_kernel_setmem(kernel_y_solve_1, 0, mem_rho_i, brisbane_rd); brisbane_kernel_setmem(kernel_y_solve_1, 1, mem_rhoqY, brisbane_rw); brisbane_kernel_setmem(kernel_y_solve_1, 2, mem_lhsY, brisbane_wr); brisbane_kernel_setmem(kernel_y_solve_1, 3, mem_vs, brisbane_rd); brisbane_kernel_setarg(kernel_y_solve_1, 4, sizeof(int), &gp1); brisbane_kernel_setarg(kernel_y_solve_1, 5, sizeof(double), &dy1); brisbane_kernel_setarg(kernel_y_solve_1, 6, sizeof(double), &dy3); brisbane_kernel_setarg(kernel_y_solve_1, 7, sizeof(double), &dy5); brisbane_kernel_setarg(kernel_y_solve_1, 8, sizeof(double), &dymax); brisbane_kernel_setarg(kernel_y_solve_1, 9, sizeof(double), &c1c5); brisbane_kernel_setarg(kernel_y_solve_1, 10, sizeof(double), &c3c4); brisbane_kernel_setarg(kernel_y_solve_1, 11, sizeof(double), &dtty1); brisbane_kernel_setarg(kernel_y_solve_1, 12, sizeof(double), &dtty2); brisbane_kernel_setarg(kernel_y_solve_1, 13, sizeof(double), &c2dtty1); brisbane_kernel_setarg(kernel_y_solve_1, 14, sizeof(double), &con43); brisbane_task task1; brisbane_task_create(&task1); brisbane_task_kernel(task1, kernel_y_solve_1, 2, kernel_y_solve_1_off, kernel_y_solve_1_idx); brisbane_task_submit(task1, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for collapse(2) private(i,j,k,ru1) for (k = 1; k <= nz2; k++) { for (i = 1; i <= gp0-2; i++) { #pragma omp simd private(ru1) for (j = 0; j <= gp1-1; j++) { ru1 = c3c4*rho_i[k][j][i]; rhoqY[k][j][i] = max(max(dy3+con43*ru1, dy5+c1c5*ru1), max(dymax+ru1, dy1)); } #pragma omp simd for (j = 1; j <= gp1-2; j++) { lhsY[0][k][j][i] = 0.0; lhsY[1][k][j][i] = -dtty2 * vs[k][j-1][i] - dtty1 * rhoqY[k][j-1][i]; lhsY[2][k][j][i] = 1.0 + c2dtty1 * rhoqY[k][j][i]; lhsY[3][k][j][i] = dtty2 * vs[k][j+1][i] - dtty1 * rhoqY[k][j+1][i]; lhsY[4][k][j][i] = 0.0; } } } #endif //--------------------------------------------------------------------- // add fourth order dissipation //--------------------------------------------------------------------- j = 1; size_t kernel_y_solve_2_off[2] = { 1, 1 }; size_t kernel_y_solve_2_idx[2] = { gp0 - 2, gp2 - 2 }; brisbane_kernel kernel_y_solve_2; brisbane_kernel_create("y_solve_2", &kernel_y_solve_2); brisbane_kernel_setmem(kernel_y_solve_2, 0, mem_lhsY, brisbane_rw); brisbane_kernel_setarg(kernel_y_solve_2, 1, sizeof(double), &comz1); brisbane_kernel_setarg(kernel_y_solve_2, 2, sizeof(double), &comz4); brisbane_kernel_setarg(kernel_y_solve_2, 3, sizeof(double), &comz5); brisbane_kernel_setarg(kernel_y_solve_2, 4, sizeof(double), &comz6); brisbane_kernel_setarg(kernel_y_solve_2, 5, sizeof(int), &j); brisbane_task task2; brisbane_task_create(&task2); brisbane_task_kernel(task2, kernel_y_solve_2, 2, kernel_y_solve_2_off, kernel_y_solve_2_idx); brisbane_task_submit(task2, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,k) #else #pragma omp target teams distribute parallel for simd collapse(2) #endif for (k = 1; k <= gp2-2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= gp0-2; i++) { lhsY[2][k][j][i] = lhsY[2][k][j][i] + comz5; lhsY[3][k][j][i] = lhsY[3][k][j][i] - comz4; lhsY[4][k][j][i] = lhsY[4][k][j][i] + comz1; lhsY[1][k][j+1][i] = lhsY[1][k][j+1][i] - comz4; lhsY[2][k][j+1][i] = lhsY[2][k][j+1][i] + comz6; lhsY[3][k][j+1][i] = lhsY[3][k][j+1][i] - comz4; lhsY[4][k][j+1][i] = lhsY[4][k][j+1][i] + comz1; } } #endif size_t kernel_y_solve_3_off[3] = { 1, 3, 1 }; size_t kernel_y_solve_3_idx[3] = { gp0 - 2, gp1 - 6, gp2 - 2 }; brisbane_kernel kernel_y_solve_3; brisbane_kernel_create("y_solve_3", &kernel_y_solve_3); brisbane_kernel_setmem(kernel_y_solve_3, 0, mem_lhsY, brisbane_rw); brisbane_kernel_setarg(kernel_y_solve_3, 1, sizeof(double), &comz1); brisbane_kernel_setarg(kernel_y_solve_3, 2, sizeof(double), &comz4); brisbane_kernel_setarg(kernel_y_solve_3, 3, sizeof(double), &comz6); brisbane_task task3; brisbane_task_create(&task3); brisbane_task_kernel(task3, kernel_y_solve_3, 3, kernel_y_solve_3_off, kernel_y_solve_3_idx); brisbane_task_submit(task3, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (k = 1; k <= gp2-2; k++) { for (j = 3; j <= gp1-4; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= gp0-2; i++) { lhsY[0][k][j][i] = lhsY[0][k][j][i] + comz1; lhsY[1][k][j][i] = lhsY[1][k][j][i] - comz4; lhsY[2][k][j][i] = lhsY[2][k][j][i] + comz6; lhsY[3][k][j][i] = lhsY[3][k][j][i] - comz4; lhsY[4][k][j][i] = lhsY[4][k][j][i] + comz1; } } } #endif j = gp1-3; size_t kernel_y_solve_4_off[2] = { 1, 1 }; size_t kernel_y_solve_4_idx[2] = { gp0 - 2, gp2 - 2 }; brisbane_kernel kernel_y_solve_4; brisbane_kernel_create("y_solve_4", &kernel_y_solve_4); brisbane_kernel_setmem(kernel_y_solve_4, 0, mem_lhsY, brisbane_rw); brisbane_kernel_setarg(kernel_y_solve_4, 1, sizeof(double), &comz1); brisbane_kernel_setarg(kernel_y_solve_4, 2, sizeof(double), &comz4); brisbane_kernel_setarg(kernel_y_solve_4, 3, sizeof(double), &comz5); brisbane_kernel_setarg(kernel_y_solve_4, 4, sizeof(double), &comz6); brisbane_kernel_setarg(kernel_y_solve_4, 5, sizeof(int), &j); brisbane_task task4; brisbane_task_create(&task4); brisbane_task_kernel(task4, kernel_y_solve_4, 2, kernel_y_solve_4_off, kernel_y_solve_4_idx); brisbane_task_submit(task4, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,k) #else #pragma omp target teams distribute parallel for simd collapse(2) #endif for (k = 1; k <= gp2-2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= gp0-2; i++) { lhsY[0][k][j][i] = lhsY[0][k][j][i] + comz1; lhsY[1][k][j][i] = lhsY[1][k][j][i] - comz4; lhsY[2][k][j][i] = lhsY[2][k][j][i] + comz6; lhsY[3][k][j][i] = lhsY[3][k][j][i] - comz4; lhsY[0][k][j+1][i] = lhsY[0][k][j+1][i] + comz1; lhsY[1][k][j+1][i] = lhsY[1][k][j+1][i] - comz4; lhsY[2][k][j+1][i] = lhsY[2][k][j+1][i] + comz5; } } #endif //--------------------------------------------------------------------- // subsequently, for (the other two factors //--------------------------------------------------------------------- size_t kernel_y_solve_5_off[3] = { 1, 1, 1 }; size_t kernel_y_solve_5_idx[3] = { gp0 - 2, gp1 - 2, gp2 - 2 }; brisbane_kernel kernel_y_solve_5; brisbane_kernel_create("y_solve_5", &kernel_y_solve_5); brisbane_kernel_setmem(kernel_y_solve_5, 0, mem_lhspY, brisbane_wr); brisbane_kernel_setmem(kernel_y_solve_5, 1, mem_lhsmY, brisbane_wr); brisbane_kernel_setmem(kernel_y_solve_5, 2, mem_lhsY, brisbane_rd); brisbane_kernel_setmem(kernel_y_solve_5, 3, mem_speed, brisbane_rd); brisbane_kernel_setarg(kernel_y_solve_5, 4, sizeof(double), &dtty2); brisbane_task task5; brisbane_task_create(&task5); brisbane_task_kernel(task5, kernel_y_solve_5, 3, kernel_y_solve_5_off, kernel_y_solve_5_idx); brisbane_task_submit(task5, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (k = 1; k <= gp2-2; k++) { for (j = 1; j <= gp1-2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= gp0-2; i++) { lhspY[0][k][j][i] = lhsY[0][k][j][i]; lhspY[1][k][j][i] = lhsY[1][k][j][i] - dtty2 * speed[k][j-1][i]; lhspY[2][k][j][i] = lhsY[2][k][j][i]; lhspY[3][k][j][i] = lhsY[3][k][j][i] + dtty2 * speed[k][j+1][i]; lhspY[4][k][j][i] = lhsY[4][k][j][i]; lhsmY[0][k][j][i] = lhsY[0][k][j][i]; lhsmY[1][k][j][i] = lhsY[1][k][j][i] + dtty2 * speed[k][j-1][i]; lhsmY[2][k][j][i] = lhsY[2][k][j][i]; lhsmY[3][k][j][i] = lhsY[3][k][j][i] - dtty2 * speed[k][j+1][i]; lhsmY[4][k][j][i] = lhsY[4][k][j][i]; } } } #endif //--------------------------------------------------------------------- // FORWARD ELIMINATION //--------------------------------------------------------------------- size_t kernel_y_solve_6_off[1] = { 1 }; size_t kernel_y_solve_6_idx[1] = { gp2 - 2 }; brisbane_kernel kernel_y_solve_6; brisbane_kernel_create("y_solve_6", &kernel_y_solve_6); brisbane_kernel_setmem(kernel_y_solve_6, 0, mem_lhsY, brisbane_rw); brisbane_kernel_setmem(kernel_y_solve_6, 1, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_y_solve_6, 2, sizeof(int), &gp0); brisbane_kernel_setarg(kernel_y_solve_6, 3, sizeof(int), &gp1); brisbane_task task6; brisbane_task_create(&task6); brisbane_task_kernel(task6, kernel_y_solve_6, 1, kernel_y_solve_6_off, kernel_y_solve_6_idx); brisbane_task_submit(task6, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,j,k,m,fac1,j1,j2) for (k = 1; k <= gp2-2; k++) { for (j = 0; j <= gp1-3; j++) { j1 = j + 1; j2 = j + 2; for (i = 1; i <= gp0-2; i++) { fac1 = 1.0/lhsY[2][k][j][i]; lhsY[3][k][j][i] = fac1*lhsY[3][k][j][i]; lhsY[4][k][j][i] = fac1*lhsY[4][k][j][i]; for (m = 0; m < 3; m++) { rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; } lhsY[2][k][j1][i] = lhsY[2][k][j1][i] - lhsY[1][k][j1][i]*lhsY[3][k][j][i]; lhsY[3][k][j1][i] = lhsY[3][k][j1][i] - lhsY[1][k][j1][i]*lhsY[4][k][j][i]; for (m = 0; m < 3; m++) { rhs[m][k][j1][i] = rhs[m][k][j1][i] - lhsY[1][k][j1][i]*rhs[m][k][j][i]; } lhsY[1][k][j2][i] = lhsY[1][k][j2][i] - lhsY[0][k][j2][i]*lhsY[3][k][j][i]; lhsY[2][k][j2][i] = lhsY[2][k][j2][i] - lhsY[0][k][j2][i]*lhsY[4][k][j][i]; for (m = 0; m < 3; m++) { rhs[m][k][j2][i] = rhs[m][k][j2][i] - lhsY[0][k][j2][i]*rhs[m][k][j][i]; } } } } #endif //--------------------------------------------------------------------- // The last two rows in this grid block are a bit different, // since they for (not have two more rows available for the // elimination of off-diagonal entries //--------------------------------------------------------------------- j = gp1-2; j1 = gp1-1; size_t kernel_y_solve_7_off[2] = { 1, 1 }; size_t kernel_y_solve_7_idx[2] = { gp0 - 2, gp2 - 2 }; brisbane_kernel kernel_y_solve_7; brisbane_kernel_create("y_solve_7", &kernel_y_solve_7); brisbane_kernel_setmem(kernel_y_solve_7, 0, mem_lhsY, brisbane_rw); brisbane_kernel_setmem(kernel_y_solve_7, 1, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_y_solve_7, 2, sizeof(int), &j); brisbane_kernel_setarg(kernel_y_solve_7, 3, sizeof(int), &j1); brisbane_task task7; brisbane_task_create(&task7); brisbane_task_kernel(task7, kernel_y_solve_7, 2, kernel_y_solve_7_off, kernel_y_solve_7_idx); brisbane_task_submit(task7, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,k,m,fac1,fac2) collapse(2) for (k = 1; k <= gp2-2; k++) { for (i = 1; i <= gp0-2; i++) { fac1 = 1.0/lhsY[2][k][j][i]; lhsY[3][k][j][i] = fac1*lhsY[3][k][j][i]; lhsY[4][k][j][i] = fac1*lhsY[4][k][j][i]; for (m = 0; m < 3; m++) { rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; } lhsY[2][k][j1][i] = lhsY[2][k][j1][i] - lhsY[1][k][j1][i]*lhsY[3][k][j][i]; lhsY[3][k][j1][i] = lhsY[3][k][j1][i] - lhsY[1][k][j1][i]*lhsY[4][k][j][i]; for (m = 0; m < 3; m++) { rhs[m][k][j1][i] = rhs[m][k][j1][i] - lhsY[1][k][j1][i]*rhs[m][k][j][i]; } //--------------------------------------------------------------------- // scale the last row immediately //--------------------------------------------------------------------- fac2 = 1.0/lhsY[2][k][j1][i]; for (m = 0; m < 3; m++) { rhs[m][k][j1][i] = fac2*rhs[m][k][j1][i]; } } } #endif //--------------------------------------------------------------------- // for (the u+c and the u-c factors //--------------------------------------------------------------------- size_t kernel_y_solve_8_off[2] = { 1, 1 }; size_t kernel_y_solve_8_idx[2] = { gp0 - 2, gp2 - 2 }; brisbane_kernel kernel_y_solve_8; brisbane_kernel_create("y_solve_8", &kernel_y_solve_8); brisbane_kernel_setmem(kernel_y_solve_8, 0, mem_lhspY, brisbane_rw); brisbane_kernel_setmem(kernel_y_solve_8, 1, mem_lhsmY, brisbane_rw); brisbane_kernel_setmem(kernel_y_solve_8, 2, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_y_solve_8, 3, sizeof(int), &gp1); brisbane_task task8; brisbane_task_create(&task8); brisbane_task_kernel(task8, kernel_y_solve_8, 2, kernel_y_solve_8_off, kernel_y_solve_8_idx); brisbane_task_submit(task8, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd private(j,m,fac1,j1,j2) collapse(2) #endif for (k = 1; k <= gp2-2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(fac1,j1,j2) #endif for (i = 1; i <= gp0-2; i++) { for (j = 0; j <= gp1-3; j++) { j1 = j + 1; j2 = j + 2; m = 3; fac1 = 1.0/lhspY[2][k][j][i]; lhspY[3][k][j][i] = fac1*lhspY[3][k][j][i]; lhspY[4][k][j][i] = fac1*lhspY[4][k][j][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhspY[2][k][j1][i] = lhspY[2][k][j1][i] - lhspY[1][k][j1][i]*lhspY[3][k][j][i]; lhspY[3][k][j1][i] = lhspY[3][k][j1][i] - lhspY[1][k][j1][i]*lhspY[4][k][j][i]; rhs[m][k][j1][i] = rhs[m][k][j1][i] - lhspY[1][k][j1][i]*rhs[m][k][j][i]; lhspY[1][k][j2][i] = lhspY[1][k][j2][i] - lhspY[0][k][j2][i]*lhspY[3][k][j][i]; lhspY[2][k][j2][i] = lhspY[2][k][j2][i] - lhspY[0][k][j2][i]*lhspY[4][k][j][i]; rhs[m][k][j2][i] = rhs[m][k][j2][i] - lhspY[0][k][j2][i]*rhs[m][k][j][i]; m = 4; fac1 = 1.0/lhsmY[2][k][j][i]; lhsmY[3][k][j][i] = fac1*lhsmY[3][k][j][i]; lhsmY[4][k][j][i] = fac1*lhsmY[4][k][j][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhsmY[2][k][j1][i] = lhsmY[2][k][j1][i] - lhsmY[1][k][j1][i]*lhsmY[3][k][j][i]; lhsmY[3][k][j1][i] = lhsmY[3][k][j1][i] - lhsmY[1][k][j1][i]*lhsmY[4][k][j][i]; rhs[m][k][j1][i] = rhs[m][k][j1][i] - lhsmY[1][k][j1][i]*rhs[m][k][j][i]; lhsmY[1][k][j2][i] = lhsmY[1][k][j2][i] - lhsmY[0][k][j2][i]*lhsmY[3][k][j][i]; lhsmY[2][k][j2][i] = lhsmY[2][k][j2][i] - lhsmY[0][k][j2][i]*lhsmY[4][k][j][i]; rhs[m][k][j2][i] = rhs[m][k][j2][i] - lhsmY[0][k][j2][i]*rhs[m][k][j][i]; } } } #endif //--------------------------------------------------------------------- // And again the last two rows separately //--------------------------------------------------------------------- j = gp1-2; j1 = gp1-1; size_t kernel_y_solve_9_off[2] = { 1, 1 }; size_t kernel_y_solve_9_idx[2] = { gp0 - 2, gp2 - 2 }; brisbane_kernel kernel_y_solve_9; brisbane_kernel_create("y_solve_9", &kernel_y_solve_9); brisbane_kernel_setmem(kernel_y_solve_9, 0, mem_lhspY, brisbane_rw); brisbane_kernel_setmem(kernel_y_solve_9, 1, mem_lhsmY, brisbane_rw); brisbane_kernel_setmem(kernel_y_solve_9, 2, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_y_solve_9, 3, sizeof(int), &j); brisbane_kernel_setarg(kernel_y_solve_9, 4, sizeof(int), &j1); brisbane_task task9; brisbane_task_create(&task9); brisbane_task_kernel(task9, kernel_y_solve_9, 2, kernel_y_solve_9_off, kernel_y_solve_9_idx); brisbane_task_submit(task9, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,k,m,fac1) #else #pragma omp target teams distribute parallel for simd private(m,fac1) collapse(2) #endif for (k = 1; k <= gp2-2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(m, fac1) #endif for (i = 1; i <= gp0-2; i++) { m = 3; fac1 = 1.0/lhspY[2][k][j][i]; lhspY[3][k][j][i] = fac1*lhspY[3][k][j][i]; lhspY[4][k][j][i] = fac1*lhspY[4][k][j][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhspY[2][k][j1][i] = lhspY[2][k][j1][i] - lhspY[1][k][j1][i]*lhspY[3][k][j][i]; lhspY[3][k][j1][i] = lhspY[3][k][j1][i] - lhspY[1][k][j1][i]*lhspY[4][k][j][i]; rhs[m][k][j1][i] = rhs[m][k][j1][i] - lhspY[1][k][j1][i]*rhs[m][k][j][i]; m = 4; fac1 = 1.0/lhsmY[2][k][j][i]; lhsmY[3][k][j][i] = fac1*lhsmY[3][k][j][i]; lhsmY[4][k][j][i] = fac1*lhsmY[4][k][j][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhsmY[2][k][j1][i] = lhsmY[2][k][j1][i] - lhsmY[1][k][j1][i]*lhsmY[3][k][j][i]; lhsmY[3][k][j1][i] = lhsmY[3][k][j1][i] - lhsmY[1][k][j1][i]*lhsmY[4][k][j][i]; rhs[m][k][j1][i] = rhs[m][k][j1][i] - lhsmY[1][k][j1][i]*rhs[m][k][j][i]; //--------------------------------------------------------------------- // Scale the last row immediately //--------------------------------------------------------------------- rhs[3][k][j1][i] = rhs[3][k][j1][i]/lhspY[2][k][j1][i]; rhs[4][k][j1][i] = rhs[4][k][j1][i]/lhsmY[2][k][j1][i]; } } #endif //--------------------------------------------------------------------- // BACKSUBSTITUTION //--------------------------------------------------------------------- j = gp1-2; j1 = gp1-1; size_t kernel_y_solve_10_off[2] = { 1, 1 }; size_t kernel_y_solve_10_idx[2] = { gp0 - 2, gp2 - 2 }; brisbane_kernel kernel_y_solve_10; brisbane_kernel_create("y_solve_10", &kernel_y_solve_10); brisbane_kernel_setmem(kernel_y_solve_10, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_y_solve_10, 1, mem_lhsY, brisbane_rw); brisbane_kernel_setmem(kernel_y_solve_10, 2, mem_lhspY, brisbane_rw); brisbane_kernel_setmem(kernel_y_solve_10, 3, mem_lhsmY, brisbane_rw); brisbane_kernel_setarg(kernel_y_solve_10, 4, sizeof(int), &j); brisbane_kernel_setarg(kernel_y_solve_10, 5, sizeof(int), &j1); brisbane_task task10; brisbane_task_create(&task10); brisbane_task_kernel(task10, kernel_y_solve_10, 2, kernel_y_solve_10_off, kernel_y_solve_10_idx); brisbane_task_submit(task10, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,k,m) collapse(2) for (k = 1; k <= gp2-2; k++) { for (i = 1; i <= gp0-2; i++) { for (m = 0; m < 3; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - lhsY[3][k][j][i]*rhs[m][k][j1][i]; } rhs[3][k][j][i] = rhs[3][k][j][i] - lhspY[3][k][j][i]*rhs[3][k][j1][i]; rhs[4][k][j][i] = rhs[4][k][j][i] - lhsmY[3][k][j][i]*rhs[4][k][j1][i]; } } #endif //--------------------------------------------------------------------- // The first three factors //--------------------------------------------------------------------- size_t kernel_y_solve_11_off[2] = { 1, 1 }; size_t kernel_y_solve_11_idx[2] = { gp0 - 2, gp2 - 2 }; brisbane_kernel kernel_y_solve_11; brisbane_kernel_create("y_solve_11", &kernel_y_solve_11); brisbane_kernel_setmem(kernel_y_solve_11, 0, mem_rhs, brisbane_rw); brisbane_kernel_setmem(kernel_y_solve_11, 1, mem_lhsY, brisbane_rw); brisbane_kernel_setmem(kernel_y_solve_11, 2, mem_lhspY, brisbane_rw); brisbane_kernel_setmem(kernel_y_solve_11, 3, mem_lhsmY, brisbane_rw); brisbane_kernel_setarg(kernel_y_solve_11, 4, sizeof(int), &gp1); brisbane_task task11; brisbane_task_create(&task11); brisbane_task_kernel(task11, kernel_y_solve_11, 2, kernel_y_solve_11_off, kernel_y_solve_11_idx); brisbane_task_submit(task11, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd private(j,m,j1,j2) collapse(2) #endif for (k = 1; k <= gp2-2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(j1,j2) #endif for (i = 1; i <= gp0-2; i++) { for (j = gp1-3; j >= 0; j--) { j1 = j + 1; j2 = j + 2; for (m = 0; m < 3; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - lhsY[3][k][j][i]*rhs[m][k][j1][i] - lhsY[4][k][j][i]*rhs[m][k][j2][i]; } //------------------------------------------------------------------- // And the remaining two //------------------------------------------------------------------- rhs[3][k][j][i] = rhs[3][k][j][i] - lhspY[3][k][j][i]*rhs[3][k][j1][i] - lhspY[4][k][j][i]*rhs[3][k][j2][i]; rhs[4][k][j][i] = rhs[4][k][j][i] - lhsmY[3][k][j][i]*rhs[4][k][j1][i] - lhsmY[4][k][j][i]*rhs[4][k][j2][i]; } } } #endif }/* end omp target data */ brisbane_mem_release(mem_lhsY); brisbane_mem_release(mem_lhspY); brisbane_mem_release(mem_lhsmY); brisbane_mem_release(mem_rhoqY); pinvr(); } void z_solve() { int i, j, k, k1, k2, m; int gp21,gp22,gp23; double ru1, fac1, fac2; double lhsZ[5][ny2+1][IMAXP+1][IMAXP+1]; double lhspZ[5][ny2+1][IMAXP+1][IMAXP+1]; double lhsmZ[5][ny2+1][IMAXP+1][IMAXP+1]; double rhosZ[ny2+1][IMAXP+1][PROBLEM_SIZE]; int ni=nz2+1; gp21=grid_points[2]-1; gp22=grid_points[2]-2; gp23=grid_points[2]-3; brisbane_mem mem_lhsZ; brisbane_mem mem_lhspZ; brisbane_mem mem_lhsmZ; brisbane_mem mem_rhosZ; brisbane_mem_create(sizeof(double) * 5 * (ny2 + 1) * (IMAXP + 1) * (IMAXP + 1), &mem_lhsZ); brisbane_mem_create(sizeof(double) * 5 * (ny2 + 1) * (IMAXP + 1) * (IMAXP + 1), &mem_lhspZ); brisbane_mem_create(sizeof(double) * 5 * (ny2 + 1) * (IMAXP + 1) * (IMAXP + 1), &mem_lhsmZ); brisbane_mem_create(sizeof(double) * (ny2 + 1) * (IMAXP + 1) * (PROBLEM_SIZE), &mem_rhosZ); #pragma omp target data map(alloc:lhsZ[:][:][:][:],lhspZ[:][:][:][:],lhsmZ[:][:][:][:],rhosZ[:][:][:]) //present(rho_i,ws,speed,rhs) { size_t kernel_z_solve_0_off[2] = { 1, 1 }; size_t kernel_z_solve_0_idx[2] = { nx2, ny2 }; brisbane_kernel kernel_z_solve_0; brisbane_kernel_create("z_solve_0", &kernel_z_solve_0); brisbane_kernel_setmem(kernel_z_solve_0, 0, mem_lhsZ, brisbane_wr); brisbane_kernel_setmem(kernel_z_solve_0, 1, mem_lhspZ, brisbane_wr); brisbane_kernel_setmem(kernel_z_solve_0, 2, mem_lhsmZ, brisbane_wr); brisbane_kernel_setarg(kernel_z_solve_0, 3, sizeof(int), &ni); brisbane_task task0; brisbane_task_create(&task0); brisbane_task_kernel(task0, kernel_z_solve_0, 2, kernel_z_solve_0_off, kernel_z_solve_0_idx); brisbane_task_submit(task0, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,j,m) collapse(2) for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 5; m++) { lhsZ[m][j][0][i] = 0.0; lhspZ[m][j][0][i] = 0.0; lhsmZ[m][j][0][i] = 0.0; lhsZ[m][j][ni][i] = 0.0; lhspZ[m][j][ni][i] = 0.0; lhsmZ[m][j][ni][i] = 0.0; } lhsZ[2][j][0][i] = 1.0; lhspZ[2][j][0][i] = 1.0; lhsmZ[2][j][0][i] = 1.0; lhsZ[2][j][ni][i] = 1.0; lhspZ[2][j][ni][i] = 1.0; lhsmZ[2][j][ni][i] = 1.0; } } #endif //--------------------------------------------------------------------- // Computes the left hand side for the three z-factors //--------------------------------------------------------------------- //--------------------------------------------------------------------- // first fill the lhs for the u-eigenvalue //--------------------------------------------------------------------- size_t kernel_z_solve_1_off[3] = { 0, 1, 1 }; size_t kernel_z_solve_1_idx[3] = { nz2 + 2, nx2, ny2 }; brisbane_kernel kernel_z_solve_1; brisbane_kernel_create("z_solve_1", &kernel_z_solve_1); brisbane_kernel_setmem(kernel_z_solve_1, 0, mem_rho_i, brisbane_rd); brisbane_kernel_setmem(kernel_z_solve_1, 1, mem_rhosZ, brisbane_wr); brisbane_kernel_setarg(kernel_z_solve_1, 2, sizeof(double), &dz1); brisbane_kernel_setarg(kernel_z_solve_1, 3, sizeof(double), &dz4); brisbane_kernel_setarg(kernel_z_solve_1, 4, sizeof(double), &dz5); brisbane_kernel_setarg(kernel_z_solve_1, 5, sizeof(double), &dzmax); brisbane_kernel_setarg(kernel_z_solve_1, 6, sizeof(double), &c1c5); brisbane_kernel_setarg(kernel_z_solve_1, 7, sizeof(double), &c3c4); brisbane_kernel_setarg(kernel_z_solve_1, 8, sizeof(double), &con43); brisbane_task task1; brisbane_task_create(&task1); brisbane_task_kernel(task1, kernel_z_solve_1, 3, kernel_z_solve_1_off, kernel_z_solve_1_idx); brisbane_task_submit(task1, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,ru1) collapse(2) #else #pragma omp target teams distribute parallel for simd private(ru1) collapse(3) #endif for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (k = 0; k <= nz2+1; k++) { ru1 = c3c4*rho_i[k][j][i]; rhosZ[j][i][k] = max(max(dz4+con43*ru1, dz5+c1c5*ru1), max(dzmax+ru1, dz1)); } } } #endif size_t kernel_z_solve_2_off[3] = { 1, 1, 1 }; size_t kernel_z_solve_2_idx[3] = { nz2, nx2, ny2 }; brisbane_kernel kernel_z_solve_2; brisbane_kernel_create("z_solve_2", &kernel_z_solve_2); brisbane_kernel_setmem(kernel_z_solve_2, 0, mem_lhsZ, brisbane_rd); brisbane_kernel_setmem(kernel_z_solve_2, 1, mem_ws, brisbane_wr); brisbane_kernel_setmem(kernel_z_solve_2, 2, mem_rhosZ, brisbane_wr); brisbane_kernel_setarg(kernel_z_solve_2, 3, sizeof(double), &dttz1); brisbane_kernel_setarg(kernel_z_solve_2, 4, sizeof(double), &dttz2); brisbane_kernel_setarg(kernel_z_solve_2, 5, sizeof(double), &c2dttz1); brisbane_task task2; brisbane_task_create(&task2); brisbane_task_kernel(task2, kernel_z_solve_2, 3, kernel_z_solve_2_off, kernel_z_solve_2_idx); brisbane_task_submit(task2, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (k = 1; k <= nz2; k++) { lhsZ[0][j][k][i] = 0.0; lhsZ[1][j][k][i] = -dttz2 * ws[k-1][j][i] - dttz1 * rhosZ[j][i][k-1]; lhsZ[2][j][k][i] = 1.0 + c2dttz1 * rhosZ[j][i][k]; lhsZ[3][j][k][i] = dttz2 * ws[k+1][j][i] - dttz1 * rhosZ[j][i][k+1]; lhsZ[4][j][k][i] = 0.0; } } } #endif //--------------------------------------------------------------------- // add fourth order dissipation //--------------------------------------------------------------------- size_t kernel_z_solve_3_off[2] = { 1, 1 }; size_t kernel_z_solve_3_idx[2] = { nx2, ny2 }; brisbane_kernel kernel_z_solve_3; brisbane_kernel_create("z_solve_3", &kernel_z_solve_3); brisbane_kernel_setmem(kernel_z_solve_3, 0, mem_lhsZ, brisbane_rw); brisbane_kernel_setarg(kernel_z_solve_3, 1, sizeof(double), &comz1); brisbane_kernel_setarg(kernel_z_solve_3, 2, sizeof(double), &comz4); brisbane_kernel_setarg(kernel_z_solve_3, 3, sizeof(double), &comz5); brisbane_kernel_setarg(kernel_z_solve_3, 4, sizeof(double), &comz6); brisbane_task task3; brisbane_task_create(&task3); brisbane_task_kernel(task3, kernel_z_solve_3, 2, kernel_z_solve_3_off, kernel_z_solve_3_idx); brisbane_task_submit(task3, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) #else #pragma omp target teams distribute parallel for simd private(k) collapse(2) #endif for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= nx2; i++) { k = 1; lhsZ[2][j][k][i] = lhsZ[2][j][k][i] + comz5; lhsZ[3][j][k][i] = lhsZ[3][j][k][i] - comz4; lhsZ[4][j][k][i] = lhsZ[4][j][k][i] + comz1; k = 2; lhsZ[1][j][k][i] = lhsZ[1][j][k][i] - comz4; lhsZ[2][j][k][i] = lhsZ[2][j][k][i] + comz6; lhsZ[3][j][k][i] = lhsZ[3][j][k][i] - comz4; lhsZ[4][j][k][i] = lhsZ[4][j][k][i] + comz1; } } #endif size_t kernel_z_solve_4_off[3] = { 1, 3, 1 }; size_t kernel_z_solve_4_idx[3] = { nx2, nz2 - 4, ny2 }; brisbane_kernel kernel_z_solve_4; brisbane_kernel_create("z_solve_4", &kernel_z_solve_4); brisbane_kernel_setmem(kernel_z_solve_4, 0, mem_lhsZ, brisbane_rw); brisbane_kernel_setarg(kernel_z_solve_4, 1, sizeof(double), &comz1); brisbane_kernel_setarg(kernel_z_solve_4, 2, sizeof(double), &comz4); brisbane_kernel_setarg(kernel_z_solve_4, 3, sizeof(double), &comz6); brisbane_task task4; brisbane_task_create(&task4); brisbane_task_kernel(task4, kernel_z_solve_4, 3, kernel_z_solve_4_off, kernel_z_solve_4_idx); brisbane_task_submit(task4, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (j = 1; j <= ny2; j++) { for (k = 3; k <= nz2-2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= nx2; i++) { lhsZ[0][j][k][i] = lhsZ[0][j][k][i] + comz1; lhsZ[1][j][k][i] = lhsZ[1][j][k][i] - comz4; lhsZ[2][j][k][i] = lhsZ[2][j][k][i] + comz6; lhsZ[3][j][k][i] = lhsZ[3][j][k][i] - comz4; lhsZ[4][j][k][i] = lhsZ[4][j][k][i] + comz1; } } } #endif size_t kernel_z_solve_5_off[2] = { 1, 1 }; size_t kernel_z_solve_5_idx[2] = { nx2, ny2 }; brisbane_kernel kernel_z_solve_5; brisbane_kernel_create("z_solve_5", &kernel_z_solve_5); brisbane_kernel_setmem(kernel_z_solve_5, 0, mem_lhsZ, brisbane_rw); brisbane_kernel_setarg(kernel_z_solve_5, 1, sizeof(double), &comz1); brisbane_kernel_setarg(kernel_z_solve_5, 2, sizeof(double), &comz4); brisbane_kernel_setarg(kernel_z_solve_5, 3, sizeof(double), &comz5); brisbane_kernel_setarg(kernel_z_solve_5, 4, sizeof(double), &comz6); brisbane_task task5; brisbane_task_create(&task5); brisbane_task_kernel(task5, kernel_z_solve_5, 3, kernel_z_solve_5_off, kernel_z_solve_5_idx); brisbane_task_submit(task5, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) #else #pragma omp target teams distribute parallel for simd private(k) collapse(2) #endif for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= nx2; i++) { k = nz2-1; lhsZ[0][j][k][i] = lhsZ[0][j][k][i] + comz1; lhsZ[1][j][k][i] = lhsZ[1][j][k][i] - comz4; lhsZ[2][j][k][i] = lhsZ[2][j][k][i] + comz6; lhsZ[3][j][k][i] = lhsZ[3][j][k][i] - comz4; k = nz2; lhsZ[0][j][k][i] = lhsZ[0][j][k][i] + comz1; lhsZ[1][j][k][i] = lhsZ[1][j][k][i] - comz4; lhsZ[2][j][k][i] = lhsZ[2][j][k][i] + comz5; } } #endif //--------------------------------------------------------------------- // subsequently, fill the other factors (u+c), (u-c) //--------------------------------------------------------------------- size_t kernel_z_solve_6_off[3] = { 1, 1, 1 }; size_t kernel_z_solve_6_idx[3] = { nx2, nz2, ny2 }; brisbane_kernel kernel_z_solve_6; brisbane_kernel_create("z_solve_6", &kernel_z_solve_6); brisbane_kernel_setmem(kernel_z_solve_6, 0, mem_lhsZ, brisbane_rd); brisbane_kernel_setmem(kernel_z_solve_6, 1, mem_lhspZ, brisbane_wr); brisbane_kernel_setmem(kernel_z_solve_6, 2, mem_lhsmZ, brisbane_wr); brisbane_kernel_setmem(kernel_z_solve_6, 3, mem_speed, brisbane_rd); brisbane_kernel_setarg(kernel_z_solve_6, 4, sizeof(double), &dttz2); brisbane_task task6; brisbane_task_create(&task6); brisbane_task_kernel(task6, kernel_z_solve_6, 3, kernel_z_solve_6_off, kernel_z_solve_6_idx); brisbane_task_submit(task6, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k) collapse(2) #else #pragma omp target teams distribute parallel for simd collapse(3) #endif for (j = 1; j <= ny2; j++) { for (k = 1; k <= nz2; k++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd #endif for (i = 1; i <= nx2; i++) { lhspZ[0][j][k][i] = lhsZ[0][j][k][i]; lhspZ[1][j][k][i] = lhsZ[1][j][k][i] - dttz2 * speed[k-1][j][i]; lhspZ[2][j][k][i] = lhsZ[2][j][k][i]; lhspZ[3][j][k][i] = lhsZ[3][j][k][i] + dttz2 * speed[k+1][j][i]; lhspZ[4][j][k][i] = lhsZ[4][j][k][i]; lhsmZ[0][j][k][i] = lhsZ[0][j][k][i]; lhsmZ[1][j][k][i] = lhsZ[1][j][k][i] + dttz2 * speed[k-1][j][i]; lhsmZ[2][j][k][i] = lhsZ[2][j][k][i]; lhsmZ[3][j][k][i] = lhsZ[3][j][k][i] - dttz2 * speed[k+1][j][i]; lhsmZ[4][j][k][i] = lhsZ[4][j][k][i]; } } } #endif //--------------------------------------------------------------------- // FORWARD ELIMINATION //--------------------------------------------------------------------- size_t kernel_z_solve_7_off[2] = { 1, 1 }; size_t kernel_z_solve_7_idx[2] = { nx2, ny2 }; brisbane_kernel kernel_z_solve_7; brisbane_kernel_create("z_solve_7", &kernel_z_solve_7); brisbane_kernel_setmem(kernel_z_solve_7, 0, mem_lhsZ, brisbane_rw); brisbane_kernel_setmem(kernel_z_solve_7, 1, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_z_solve_7, 2, sizeof(int), &gp23); brisbane_task task7; brisbane_task_create(&task7); brisbane_task_kernel(task7, kernel_z_solve_7, 2, kernel_z_solve_7_off, kernel_z_solve_7_idx); brisbane_task_submit(task7, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd private(k,m,fac1,k1,k2) collapse(2) #endif for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(fac1,k1,k2) #endif for (i = 1; i <= nx2; i++) { for (k = 0; k <= gp23; k++) { k1 = k + 1; k2 = k + 2; fac1 = 1.0/lhsZ[2][j][k][i]; lhsZ[3][j][k][i] = fac1*lhsZ[3][j][k][i]; lhsZ[4][j][k][i] = fac1*lhsZ[4][j][k][i]; for (m = 0; m < 3; m++) { rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; } lhsZ[2][j][k1][i] = lhsZ[2][j][k1][i] - lhsZ[1][j][k1][i]*lhsZ[3][j][k][i]; lhsZ[3][j][k1][i] = lhsZ[3][j][k1][i] - lhsZ[1][j][k1][i]*lhsZ[4][j][k][i]; for (m = 0; m < 3; m++) { rhs[m][k1][j][i] = rhs[m][k1][j][i] - lhsZ[1][j][k1][i]*rhs[m][k][j][i]; } lhsZ[1][j][k2][i] = lhsZ[1][j][k2][i] - lhsZ[0][j][k2][i]*lhsZ[3][j][k][i]; lhsZ[2][j][k2][i] = lhsZ[2][j][k2][i] - lhsZ[0][j][k2][i]*lhsZ[4][j][k][i]; for (m = 0; m < 3; m++) { rhs[m][k2][j][i] = rhs[m][k2][j][i] - lhsZ[0][j][k2][i]*rhs[m][k][j][i]; } } } } #endif //--------------------------------------------------------------------- // The last two rows in this grid block are a bit different, // since they for (not have two more rows available for the // elimination of off-diagonal entries //--------------------------------------------------------------------- k = gp22; k1 = gp21; size_t kernel_z_solve_8_off[2] = { 1, 1 }; size_t kernel_z_solve_8_idx[2] = { nx2, ny2 }; brisbane_kernel kernel_z_solve_8; brisbane_kernel_create("z_solve_8", &kernel_z_solve_8); brisbane_kernel_setmem(kernel_z_solve_8, 0, mem_lhsZ, brisbane_rw); brisbane_kernel_setmem(kernel_z_solve_8, 1, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_z_solve_8, 2, sizeof(int), &k); brisbane_kernel_setarg(kernel_z_solve_8, 3, sizeof(int), &k1); brisbane_task task8; brisbane_task_create(&task8); brisbane_task_kernel(task8, kernel_z_solve_8, 2, kernel_z_solve_8_off, kernel_z_solve_8_idx); brisbane_task_submit(task8, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(m,fac1,fac2) collapse(2) for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { fac1 = 1.0/lhsZ[2][j][k][i]; lhsZ[3][j][k][i] = fac1*lhsZ[3][j][k][i]; lhsZ[4][j][k][i] = fac1*lhsZ[4][j][k][i]; for (m = 0; m < 3; m++) { rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; } lhsZ[2][j][k1][i] = lhsZ[2][j][k1][i] - lhsZ[1][j][k1][i]*lhsZ[3][j][k][i]; lhsZ[3][j][k1][i] = lhsZ[3][j][k1][i] - lhsZ[1][j][k1][i]*lhsZ[4][j][k][i]; for (m = 0; m < 3; m++) { rhs[m][k1][j][i] = rhs[m][k1][j][i] - lhsZ[1][j][k1][i]*rhs[m][k][j][i]; } //--------------------------------------------------------------------- // scale the last row immediately //--------------------------------------------------------------------- fac2 = 1.0/lhsZ[2][j][k1][i]; for (m = 0; m < 3; m++) { rhs[m][k1][j][i] = fac2*rhs[m][k1][j][i]; } } } #endif //--------------------------------------------------------------------- // for (the u+c and the u-c factors //--------------------------------------------------------------------- size_t kernel_z_solve_9_off[2] = { 1, 1 }; size_t kernel_z_solve_9_idx[2] = { nx2, ny2 }; brisbane_kernel kernel_z_solve_9; brisbane_kernel_create("z_solve_9", &kernel_z_solve_9); brisbane_kernel_setmem(kernel_z_solve_9, 0, mem_lhspZ, brisbane_rw); brisbane_kernel_setmem(kernel_z_solve_9, 1, mem_lhsmZ, brisbane_rw); brisbane_kernel_setmem(kernel_z_solve_9, 2, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_z_solve_9, 3, sizeof(int), &gp23); brisbane_task task9; brisbane_task_create(&task9); brisbane_task_kernel(task9, kernel_z_solve_9, 2, kernel_z_solve_9_off, kernel_z_solve_9_idx); brisbane_task_submit(task9, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd private(k,m,fac1,k1,k2) collapse(2) #endif for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(fac1,k1,k2) #endif for (i = 1; i <= nx2; i++) { for (k = 0; k <= gp23; k++) { k1 = k + 1; k2 = k + 2; m = 3; fac1 = 1.0/lhspZ[2][j][k][i]; lhspZ[3][j][k][i] = fac1*lhspZ[3][j][k][i]; lhspZ[4][j][k][i] = fac1*lhspZ[4][j][k][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhspZ[2][j][k1][i] = lhspZ[2][j][k1][i] - lhspZ[1][j][k1][i]*lhspZ[3][j][k][i]; lhspZ[3][j][k1][i] = lhspZ[3][j][k1][i] - lhspZ[1][j][k1][i]*lhspZ[4][j][k][i]; rhs[m][k1][j][i] = rhs[m][k1][j][i] - lhspZ[1][j][k1][i]*rhs[m][k][j][i]; lhspZ[1][j][k2][i] = lhspZ[1][j][k2][i] - lhspZ[0][j][k2][i]*lhspZ[3][j][k][i]; lhspZ[2][j][k2][i] = lhspZ[2][j][k2][i] - lhspZ[0][j][k2][i]*lhspZ[4][j][k][i]; rhs[m][k2][j][i] = rhs[m][k2][j][i] - lhspZ[0][j][k2][i]*rhs[m][k][j][i]; m = 4; fac1 = 1.0/lhsmZ[2][j][k][i]; lhsmZ[3][j][k][i] = fac1*lhsmZ[3][j][k][i]; lhsmZ[4][j][k][i] = fac1*lhsmZ[4][j][k][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhsmZ[2][j][k1][i] = lhsmZ[2][j][k1][i] - lhsmZ[1][j][k1][i]*lhsmZ[3][j][k][i]; lhsmZ[3][j][k1][i] = lhsmZ[3][j][k1][i] - lhsmZ[1][j][k1][i]*lhsmZ[4][j][k][i]; rhs[m][k1][j][i] = rhs[m][k1][j][i] - lhsmZ[1][j][k1][i]*rhs[m][k][j][i]; lhsmZ[1][j][k2][i] = lhsmZ[1][j][k2][i] - lhsmZ[0][j][k2][i]*lhsmZ[3][j][k][i]; lhsmZ[2][j][k2][i] = lhsmZ[2][j][k2][i] - lhsmZ[0][j][k2][i]*lhsmZ[4][j][k][i]; rhs[m][k2][j][i] = rhs[m][k2][j][i] - lhsmZ[0][j][k2][i]*rhs[m][k][j][i]; } } } #endif //--------------------------------------------------------------------- // And again the last two rows separately //--------------------------------------------------------------------- k = gp22; k1 = gp21; size_t kernel_z_solve_10_off[2] = { 1, 1 }; size_t kernel_z_solve_10_idx[2] = { nx2, ny2 }; brisbane_kernel kernel_z_solve_10; brisbane_kernel_create("z_solve_10", &kernel_z_solve_10); brisbane_kernel_setmem(kernel_z_solve_10, 0, mem_lhspZ, brisbane_rw); brisbane_kernel_setmem(kernel_z_solve_10, 1, mem_lhsmZ, brisbane_rw); brisbane_kernel_setmem(kernel_z_solve_10, 2, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_z_solve_10, 3, sizeof(int), &k); brisbane_kernel_setarg(kernel_z_solve_10, 4, sizeof(int), &k1); brisbane_task task10; brisbane_task_create(&task10); brisbane_task_kernel(task10, kernel_z_solve_10, 2, kernel_z_solve_10_off, kernel_z_solve_10_idx); brisbane_task_submit(task10, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,m,fac1) #else #pragma omp target teams distribute parallel for simd private(m,fac1) collapse(2) #endif for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(m, fac1) #endif for (i = 1; i <= nx2; i++) { m = 3; fac1 = 1.0/lhspZ[2][j][k][i]; lhspZ[3][j][k][i] = fac1*lhspZ[3][j][k][i]; lhspZ[4][j][k][i] = fac1*lhspZ[4][j][k][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhspZ[2][j][k1][i] = lhspZ[2][j][k1][i] - lhspZ[1][j][k1][i]*lhspZ[3][j][k][i]; lhspZ[3][j][k1][i] = lhspZ[3][j][k1][i] - lhspZ[1][j][k1][i]*lhspZ[4][j][k][i]; rhs[m][k1][j][i] = rhs[m][k1][j][i] - lhspZ[1][j][k1][i]*rhs[m][k][j][i]; m = 4; fac1 = 1.0/lhsmZ[2][j][k][i]; lhsmZ[3][j][k][i] = fac1*lhsmZ[3][j][k][i]; lhsmZ[4][j][k][i] = fac1*lhsmZ[4][j][k][i]; rhs[m][k][j][i] = fac1*rhs[m][k][j][i]; lhsmZ[2][j][k1][i] = lhsmZ[2][j][k1][i] - lhsmZ[1][j][k1][i]*lhsmZ[3][j][k][i]; lhsmZ[3][j][k1][i] = lhsmZ[3][j][k1][i] - lhsmZ[1][j][k1][i]*lhsmZ[4][j][k][i]; rhs[m][k1][j][i] = rhs[m][k1][j][i] - lhsmZ[1][j][k1][i]*rhs[m][k][j][i]; //--------------------------------------------------------------------- // Scale the last row immediately (some of this is overkill // if this is the last cell) //--------------------------------------------------------------------- rhs[3][k1][j][i] = rhs[3][k1][j][i]/lhspZ[2][j][k1][i]; rhs[4][k1][j][i] = rhs[4][k1][j][i]/lhsmZ[2][j][k1][i]; } } #endif //--------------------------------------------------------------------- // BACKSUBSTITUTION //--------------------------------------------------------------------- k = gp22; k1 = gp21; size_t kernel_z_solve_11_off[2] = { 1, 1 }; size_t kernel_z_solve_11_idx[2] = { nx2, ny2 }; brisbane_kernel kernel_z_solve_11; brisbane_kernel_create("z_solve_11", &kernel_z_solve_11); brisbane_kernel_setmem(kernel_z_solve_11, 0, mem_lhsZ, brisbane_rd); brisbane_kernel_setmem(kernel_z_solve_11, 1, mem_lhspZ, brisbane_rd); brisbane_kernel_setmem(kernel_z_solve_11, 2, mem_lhsmZ, brisbane_rd); brisbane_kernel_setmem(kernel_z_solve_11, 3, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_z_solve_11, 4, sizeof(int), &k); brisbane_kernel_setarg(kernel_z_solve_11, 5, sizeof(int), &k1); brisbane_task task11; brisbane_task_create(&task11); brisbane_task_kernel(task11, kernel_z_solve_11, 2, kernel_z_solve_11_off, kernel_z_solve_11_idx); brisbane_task_submit(task11, brisbane_default, NULL, true); #if 0 #pragma omp target teams distribute parallel for private(i,j,m) collapse(2) for (j = 1; j <= ny2; j++) { for (i = 1; i <= nx2; i++) { for (m = 0; m < 3; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - lhsZ[3][j][k][i]*rhs[m][k1][j][i]; } rhs[3][k][j][i] = rhs[3][k][j][i] - lhspZ[3][j][k][i]*rhs[3][k1][j][i]; rhs[4][k][j][i] = rhs[4][k][j][i] - lhsmZ[3][j][k][i]*rhs[4][k1][j][i]; } } #endif //--------------------------------------------------------------------- // Whether or not this is the last processor, we always have // to complete the back-substitution //--------------------------------------------------------------------- //--------------------------------------------------------------------- // The first three factors //--------------------------------------------------------------------- size_t kernel_z_solve_12_off[2] = { 1, 1 }; size_t kernel_z_solve_12_idx[2] = { nx2, ny2 }; brisbane_kernel kernel_z_solve_12; brisbane_kernel_create("z_solve_12", &kernel_z_solve_12); brisbane_kernel_setmem(kernel_z_solve_12, 0, mem_lhsZ, brisbane_rd); brisbane_kernel_setmem(kernel_z_solve_12, 1, mem_lhspZ, brisbane_rd); brisbane_kernel_setmem(kernel_z_solve_12, 2, mem_lhsmZ, brisbane_rd); brisbane_kernel_setmem(kernel_z_solve_12, 3, mem_rhs, brisbane_rw); brisbane_kernel_setarg(kernel_z_solve_12, 4, sizeof(int), &gp23); brisbane_task task12; brisbane_task_create(&task12); brisbane_task_kernel(task12, kernel_z_solve_12, 2, kernel_z_solve_12_off, kernel_z_solve_12_idx); brisbane_task_submit(task12, brisbane_default, NULL, true); #if 0 #ifdef SPEC_USE_INNER_SIMD #pragma omp target teams distribute parallel for private(i,j,k,m) #else #pragma omp target teams distribute parallel for simd private(k,m,k1,k2) collapse(2) #endif for (j = 1; j <= ny2; j++) { #ifdef SPEC_USE_INNER_SIMD #pragma omp simd private(k1,k2) #endif for (i = 1; i <= nx2; i++) { for (k = gp23; k >= 0; k--) { k1 = k + 1; k2 = k + 2; for (m = 0; m < 3; m++) { rhs[m][k][j][i] = rhs[m][k][j][i] - lhsZ[3][j][k][i]*rhs[m][k1][j][i] - lhsZ[4][j][k][i]*rhs[m][k2][j][i]; } //------------------------------------------------------------------- // And the remaining two //------------------------------------------------------------------- rhs[3][k][j][i] = rhs[3][k][j][i] - lhspZ[3][j][k][i]*rhs[3][k1][j][i] - lhspZ[4][j][k][i]*rhs[3][k2][j][i]; rhs[4][k][j][i] = rhs[4][k][j][i] - lhsmZ[3][j][k][i]*rhs[4][k1][j][i] - lhsmZ[4][j][k][i]*rhs[4][k2][j][i]; } } } #endif }/* end omp target data */ brisbane_mem_release(mem_lhsZ); brisbane_mem_release(mem_lhspZ); brisbane_mem_release(mem_lhsmZ); brisbane_mem_release(mem_rhosZ); tzetar(); }
staticChunk.c
#include <omp.h> #include <stdio.h> int main(void) { double a[1000]; int i; int n; scanf("%d",&n); #pragma omp for schedule(static) for (i=0;i<n;i++) { a[i]=(double)i/2.0; } printf("a[878]=%f\n",a[878]); return 0; }
Layer_Conv2D.h
/* * Layers.h * rl * * Created by Guido Novati on 11.02.16. * Copyright 2016 ETH Zurich. All rights reserved. * */ #pragma once #include "Layers.h" template < int InX, int InY, int InC, //input image: x:width, y:height, c:color channels int KnX, int KnY, int KnC, //filter: x:width, y:height, c:color channels int OpX, int OpY //output img: x:width, y:height, same color channels as KnC > struct Conv2DLayer: public Layer { Params* allocate_params() const override { //number of kernel parameters: // 2d kernel size * number of inp channels * number of out channels const int nParams = KnY * KnX * InC * KnC; const int nBiases = KnC; return new Params(nParams, nBiases); } Conv2DLayer(const int _ID) : Layer(OpX * OpY * KnC, _ID) { static_assert(InX>0 && InY>0 && InC>0, "Invalid input"); static_assert(KnX>0 && KnY>0 && KnC>0, "Invalid kernel"); static_assert(OpX>0 && OpY>0, "Invalid outpus"); print(); } void print() { printf("(%d) Conv: In:[%d %d %d %d %d] F:[%d %d %d %d] Out:[%d %d %d]\n", ID, OpY,OpX,KnY,KnX,InC, KnY,KnX,InC,KnC, OpX,OpY,KnC); } void forward(const std::vector<Activation*>& act, const std::vector<Params*>& param) const override { assert(act[ID]->layersSize == OpY * OpX * KnC); assert(act[ID-1]->layersSize == OpY * OpX * KnY * KnX * InC ); assert(param[ID]->nWeights == KnY * KnX * InC * KnC); assert(param[ID]->nBiases == KnC); const int batchSize = act[ID]->batchSize; const Real* const INP = act[ID-1]->output; Real* const OUT = act[ID]->output; // printf("TO CHECK: Conv2DLayer::forward\n"); const Real* const weight= param[ID]->weights; const Real* const bias= param[ID]->biases; #pragma omp parallel for schedule(static) for (int b= 0; b < batchSize * OpY * OpX; b++) for (int n= 0; n < KnC; n++) OUT[b * KnC + n]= bias[n]; gemm(CblasRowMajor, CblasNoTrans, CblasNoTrans, batchSize * OpY * OpX, KnC, KnY * KnX * InC, (Real)1.0, INP, KnY * KnX * InC, weight, KnC, (Real)1.0, OUT, KnC); } void bckward(const std::vector<Activation*>& act, const std::vector<Params*>& param, const std::vector<Params*>& grad) const override { const int batchSize = act[ID]->batchSize; const Real* const dEdO = act[ID]->dError_dOutput; // printf("TO CHECK: Conv2DLayer::bckward\n"); const Real* const INP = act[ID-1]->output; // const Real* const weight = param[ID]->weights; // // TO CHECK: Implement BackProp to compute bias gradient: dError / dBias { Real* const grad_B = grad[ID]->biases; // size KnC std::fill(grad_B, grad_B + KnC, 0); #pragma omp parallel for schedule(static, 64/sizeof(Real)) for (int n= 0; n < KnC; n++) for (int b= 0; b < batchSize * OpY * OpX; b++) grad_B[n] += dEdO[b * KnC + n]; } // TO CHECK: Implement BackProp to compute weight gradient: dError / dWeights { Real* const grad_W = grad[ID]->weights; // KnY*KnX*InC * KnC gemm(CblasRowMajor, CblasTrans, CblasNoTrans, KnY * KnX * InC, KnC, batchSize * OpY * OpX, (Real)1.0, INP, KnY * KnX * InC, dEdO, KnC, (Real)0.0, grad_W, KnC); } // TO CHECK: Implement BackProp to compute dEdO of previous layer { Real* const errinp = act[ID-1]->dError_dOutput; gemm(CblasRowMajor, CblasNoTrans, CblasTrans, batchSize * OpY * OpX, KnY * KnX * InC, KnC, (Real)1.0, dEdO, KnC, weight, KnC, (Real)0.0, errinp, KnY * KnX * InC); } } void init(std::mt19937& gen, const std::vector<Params*>& param) const override { // get pointers to layer's weights and bias Real *const W = param[ID]->weights, *const B = param[ID]->biases; // initialize weights with Xavier initialization const int nAdded = KnX * KnY * InC, nW = param[ID]->nWeights; const Real scale = std::sqrt(6.0 / (nAdded + KnC)); std::uniform_real_distribution < Real > dis(-scale, scale); std::generate(W, W + nW, [&]() {return dis( gen );}); std::fill(B, B + KnC, 0); } };
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colorspace-private.h" #include "magick/composite.h" #include "magick/distort.h" #include "magick/draw.h" #include "magick/effect.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/image.h" #include "magick/memory_.h" #include "magick/layer.h" #include "magick/list.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/pixel-private.h" #include "magick/resource_.h" #include "magick/resize.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/transform.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,90.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,270.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(chop_info != (RectangleInfo *) NULL); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,chop_image,extent.y,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict chop_indexes, *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { *q=(*p); if (indexes != (IndexPacket *) NULL) { if (chop_indexes != (IndexPacket *) NULL) *chop_indexes++=GetPixelIndex(indexes+x); } q++; } p++; } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict chop_indexes, *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); chop_indexes=GetCacheViewAuthenticIndexQueue(chop_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { *q=(*p); if (indexes != (IndexPacket *) NULL) { if (chop_indexes != (IndexPacket *) NULL) *chop_indexes++=GetPixelIndex(indexes+x); } q++; } p++; } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; register ssize_t i; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cmyk_images=NewImageList(); for (i=0; i < (ssize_t) GetImageListLength(images); i+=4) { cmyk_image=CloneImage(images,0,0,MagickTrue,exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace); image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { SetPixelRed(q,ClampToQuantum(QuantumRange-GetPixelIntensity(images,p))); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { q->green=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { q->blue=ClampToQuantum(QuantumRange-GetPixelIntensity(images,p)); p++; q++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=GetCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; indexes=GetCacheViewAuthenticIndexQueue(cmyk_view); for (x=0; x < (ssize_t) images->columns; x++) { SetPixelIndex(indexes+x,ClampToQuantum(QuantumRange- GetPixelIntensity(images,p))); p++; } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); AppendImageToList(&cmyk_images,cmyk_image); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(crop_image); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; if (((ssize_t) (bounding_box.x+bounding_box.width) > (ssize_t) image->page.width) || ((ssize_t) (bounding_box.y+bounding_box.height) > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,crop_image,crop_image->rows,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict crop_indexes; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); crop_indexes=GetCacheViewAuthenticIndexQueue(crop_view); (void) memcpy(q,p,(size_t) crop_image->columns*sizeof(*p)); if ((indexes != (IndexPacket *) NULL) && (crop_indexes != (IndexPacket *) NULL)) (void) memcpy(crop_indexes,indexes,(size_t) crop_image->columns* sizeof(*crop_indexes)); if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CropImage) #endif proceed=SetImageProgress(image,CropImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); crop_image=NewImageList(); next=NewImageList(); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((MagickRealType) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=(ssize_t) MagickRound((MagickRealType) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((MagickRealType) (offset.y+ (geometry.y < 0 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((MagickRealType) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=(ssize_t) MagickRound((MagickRealType) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((MagickRealType) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next != (Image *) NULL) AppendImageToList(&crop_image,next); } } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict excerpt_indexes, *magick_restrict indexes; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) memcpy(q,p,(size_t) excerpt_image->columns*sizeof(*q)); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { excerpt_indexes=GetCacheViewAuthenticIndexQueue(excerpt_view); if (excerpt_indexes != (IndexPacket *) NULL) (void) memcpy(excerpt_indexes,indexes,(size_t) excerpt_image->columns*sizeof(*excerpt_indexes)); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ExcerptImage) #endif proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; MagickBooleanType status; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); status=SetImageBackgroundColor(extent_image); if (status == MagickFalse) { InheritException(exception,&extent_image->exception); extent_image=DestroyImage(extent_image); return((Image *) NULL); } status=CompositeImage(extent_image,image->compose,image,-geometry->x, -geometry->y); if (status == MagickFalse) { InheritException(exception,&extent_image->exception); extent_image=DestroyImage(extent_image); return((Image *) NULL); } return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flip_image=CloneImage(image,0,0,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flip_image,flip_image->rows,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict flip_indexes; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) memcpy(q,p,(size_t) image->columns*sizeof(*q)); indexes=GetCacheViewVirtualIndexQueue(image_view); if (indexes != (const IndexPacket *) NULL) { flip_indexes=GetCacheViewAuthenticIndexQueue(flip_view); if (flip_indexes != (IndexPacket *) NULL) (void) memcpy(flip_indexes,indexes,(size_t) image->columns* sizeof(*flip_indexes)); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlipImage) #endif proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flop_image=CloneImage(image,0,0,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,flop_image,flop_image->rows,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict flop_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } q+=flop_image->columns; indexes=GetCacheViewVirtualIndexQueue(image_view); flop_indexes=GetCacheViewAuthenticIndexQueue(flop_view); for (x=0; x < (ssize_t) flop_image->columns; x++) { (*--q)=(*p++); if ((indexes != (const IndexPacket *) NULL) && (flop_indexes != (IndexPacket *) NULL)) SetPixelIndex(flop_indexes+flop_image->columns-x-1, GetPixelIndex(indexes+x)); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlopImage) #endif proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy, const ssize_t dx,const ssize_t dy,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const IndexPacket *magick_restrict indexes; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict destination_indexes; register PixelPacket *magick_restrict q; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(source_view); (void) memcpy(q,p,(size_t) columns*sizeof(*p)); if (indexes != (IndexPacket *) NULL) { destination_indexes=GetCacheViewAuthenticIndexQueue(destination_view); if (destination_indexes != (IndexPacket *) NULL) (void) memcpy(destination_indexes,indexes,(size_t) columns*sizeof(*indexes)); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); roll_image=CloneImage(image,0,0,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t columns, y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass) == MagickFalse) { InheritException(exception,&splice_image->exception); splice_image=DestroyImage(splice_image); return((Image *) NULL); } (void) SetImageBackgroundColor(splice_image); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case StaticGravity: case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns); image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_geometry.y,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes, *magick_restrict splice_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view); for (x=0; x < columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q++; for ( ; x < (ssize_t) splice_image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,splice_image,splice_image->rows,1) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict indexes, *magick_restrict splice_indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; if ((y < 0) || (y >= (ssize_t)splice_image->rows)) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, splice_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); splice_indexes=GetCacheViewAuthenticIndexQueue(splice_view); for (x=0; x < columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q++; for ( ; x < (ssize_t) splice_image->columns; x++) { SetPixelRed(q,GetPixelRed(p)); SetPixelGreen(q,GetPixelGreen(p)); SetPixelBlue(q,GetPixelBlue(p)); SetPixelOpacity(q,OpaqueOpacity); if (image->matte != MagickFalse) SetPixelOpacity(q,GetPixelOpacity(p)); if (image->colorspace == CMYKColorspace) SetPixelIndex(splice_indexes+x,GetPixelIndex(indexes)); indexes++; p++; q++; } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % */ /* DANGER: This function destroys what it assumes to be a single image list. If the input image is part of a larger list, all other images in that list will be simply 'lost', not destroyed. Also if the crop generates a list of images only the first image is resized. And finally if the crop succeeds and the resize failed, you will get a cropped image, as well as a 'false' or 'failed' report. This function and should probably be deprecated in favor of direct calls to CropImageToTiles() or ResizeImage(), as appropriate. */ MagickExport MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry) { Image *resize_image, *transform_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,&(*image)->exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,&(*image)->exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ flags=ParseRegionGeometry(transform_image,image_geometry,&geometry, &(*image)->exception); (void) flags; if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,transform_image->blur,&(*image)->exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImages() calls TransformImage() on each image of a sequence. % % The format of the TransformImage method is: % % MagickBooleanType TransformImages(Image **image, % const char *crop_geometry,const char *image_geometry) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % */ MagickExport MagickBooleanType TransformImages(Image **images, const char *crop_geometry,const char *image_geometry) { Image *image, **image_list, *transform_images; MagickStatusType status; register ssize_t i; assert(images != (Image **) NULL); assert((*images)->signature == MagickCoreSignature); if ((*images)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s", (*images)->filename); image_list=ImageListToArray(*images,&(*images)->exception); if (image_list == (Image **) NULL) return(MagickFalse); status=MagickTrue; transform_images=NewImageList(); for (i=0; image_list[i] != (Image *) NULL; i++) { image=image_list[i]; status&=TransformImage(&image,crop_geometry,image_geometry); AppendImageToList(&transform_images,image); } *images=transform_images; image_list=(Image **) RelinquishMagickMemory(image_list); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict transpose_indexes, *magick_restrict indexes; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } (void) memcpy(q,p,(size_t) image->columns*sizeof(*q)); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { transpose_indexes=GetCacheViewAuthenticIndexQueue(transpose_view); if (transpose_indexes != (IndexPacket *) NULL) (void) memcpy(transpose_indexes,indexes,(size_t) image->columns*sizeof(*transpose_indexes)); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,TransposeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const PixelPacket *magick_restrict p; register IndexPacket *magick_restrict transverse_indexes, *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y- 1),0,1,transverse_image->rows,exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) { status=MagickFalse; continue; } q+=image->columns; for (x=0; x < (ssize_t) image->columns; x++) *--q=(*p++); indexes=GetCacheViewAuthenticIndexQueue(image_view); if (indexes != (IndexPacket *) NULL) { transverse_indexes=GetCacheViewAuthenticIndexQueue(transverse_view); if (transverse_indexes != (IndexPacket *) NULL) for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(transverse_indexes+image->columns-x-1, GetPixelIndex(indexes+x)); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransverseImage) #endif proceed=SetImageProgress(image,TransverseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.opacity=(Quantum) TransparentOpacity; (void) SetImageBackgroundColor(crop_image); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } geometry.x+=image->page.x; geometry.y+=image->page.y; return(CropImage(image,&geometry,exception)); }
H2Pack_gen_proxy_point.c
#include <stdio.h> #include <string.h> #include <stdlib.h> #include <assert.h> #include <math.h> #include <time.h> #include <omp.h> #include "H2Pack_config.h" #include "H2Pack_typedef.h" #include "H2Pack_aux_structs.h" #include "H2Pack_ID_compress.h" #include "H2Pack_gen_proxy_point.h" #include "H2Pack_utils.h" #include "utils.h" typedef enum { GEN_PP_KRNL_TIMER_IDX = 0, GEN_PP_SPMM_TIMER_IDX, GEN_PP_ID_TIMER_IDX, GEN_PP_MISC_TIMER_IDX } gen_pp_timer_idx_t; struct H2P_gen_pp_param_ { int alg; // Algorithm for selecting Yp proxy points // 0 : Uniform candidate point distribution, one QR // 1 : Nonuniform candidate point distribution, one QR // 2 : Nonuniform candidate point distribution, multiple QR int X0_size; // Number of candidate points in X int Y0_lsize; // Number of candidate points in Y per layer int L3_nlayer; // Y box exterior boundary size factor int max_layer; // Maximum number of layers in domain Y int print_timers; // If we need to print internal timings }; static struct H2P_gen_pp_param_ gen_pp_param; // Generate proxy points with two domains specified as // X = [-L1/2, L1/2]^pt_dim, Y = [-L3/2, L3/2]^pt_dim \ [-L2/2, L2/2]^pt_dim // generated proxy points are in domain Y // Input parameters: // pt_dim : Dimension of point coordinate // krnl_dim : Dimension of kernel's return // reltol : Proxy point selection relative error tolerance // krnl_param : Pointer to kernel function parameter array // krnl_eval : Pointer to kernel matrix evaluation function // L1, L2, L3 : Box sizes of X and Y // alg : Algorithm for selecting Yp proxy points // X0_size : Number of candidate points in X // Y0_lsize : Number of candidate points in Y per layer // max_layer : Maximum number of layers in domain Y // Output parameters: // pp : Generated proxy points, pp should have been initialized // timers : Size 4, timers for different parts void H2P_generate_proxy_point_nlayer( const int pt_dim, const int krnl_dim, const DTYPE reltol, const void *krnl_param, kernel_eval_fptr krnl_eval, const DTYPE L1, const DTYPE L2, const DTYPE L3, const int alg, const int X0_size, const int Y0_lsize, const int max_layer, H2P_dense_mat_p pp, double *timers ) { // 1. Initialize working arrays and parameters double st, et; int n_thread = omp_get_max_threads(); int n_layer = (alg == 0) ? 1 : DROUND((L3 - L2) / L1); if (n_layer > max_layer) n_layer = max_layer; int Y0_size = n_layer * Y0_lsize; H2P_dense_mat_p X0_coord, Y0_coord, tmp_coord, Yp_coord; H2P_dense_mat_p tmpA, min_dist, QR_buff; H2P_int_vec_p skel_idx, ID_buff; st = get_wtime_sec(); H2P_dense_mat_init(&X0_coord, pt_dim, X0_size); H2P_dense_mat_init(&Y0_coord, pt_dim, Y0_size); H2P_dense_mat_init(&tmp_coord, pt_dim, Y0_size); H2P_dense_mat_init(&Yp_coord, pt_dim, Y0_size); H2P_dense_mat_init(&tmpA, X0_size * krnl_dim, Y0_size * krnl_dim); H2P_dense_mat_init(&min_dist, X0_size, 1); H2P_dense_mat_init(&QR_buff, 2 * Y0_size, 1); H2P_int_vec_init(&skel_idx, X0_size); H2P_int_vec_init(&ID_buff, 4 * Y0_size); et = get_wtime_sec(); timers[GEN_PP_MISC_TIMER_IDX] += et - st; // 2. Generate initial candidate points in X and Y // For Y0, we generate it layer by layer. Each layer has the same number of candidate // points but different volume. Therefore a inner layer has a higher point density. st = get_wtime_sec(); H2P_gen_coord_in_ring(X0_size, pt_dim, 0.0, L1, X0_coord->data, X0_coord->ld); DTYPE Y0_layer_width = (L3 - L2) / (DTYPE) n_layer; for (int i = 0; i < n_layer; i++) { DTYPE layer_L0 = L2 + Y0_layer_width * (DTYPE) i; DTYPE layer_L1 = L2 + Y0_layer_width * (DTYPE) (i + 1); H2P_gen_coord_in_ring(Y0_lsize, pt_dim, layer_L0, layer_L1, Y0_coord->data + i * Y0_lsize, Y0_coord->ld); } et = get_wtime_sec(); timers[GEN_PP_MISC_TIMER_IDX] += et - st; // 3. Select skeleton points in domain X // Use sparsity + randomize to reduce the ID cost // (1) Generate the kernel matrix st = get_wtime_sec(); H2P_eval_kernel_matrix_OMP(krnl_param, krnl_eval, krnl_dim, X0_coord, Y0_coord, tmpA); et = get_wtime_sec(); timers[GEN_PP_KRNL_TIMER_IDX] += et - st; // (2) Generate sparse random matrix and multiply with the kernel matrix to get a reduced matrix H2P_int_vec_p rndmat_idx = ID_buff; H2P_dense_mat_p rndmat_val = QR_buff; H2P_dense_mat_p tmpA1 = min_dist; st = get_wtime_sec(); int max_nnz_col = 32; H2P_gen_rand_sparse_mat_trans(max_nnz_col, tmpA->ncol, tmpA->nrow, rndmat_val, rndmat_idx); H2P_dense_mat_resize(tmpA1, tmpA->nrow, tmpA->nrow); H2P_calc_sparse_mm_trans( tmpA->nrow, tmpA->nrow, tmpA->ncol, rndmat_val, rndmat_idx, tmpA->data, tmpA->ld, tmpA1->data, tmpA1->ld ); et = get_wtime_sec(); timers[GEN_PP_KRNL_TIMER_IDX] += et - st; // (3) Calculate ID approximation on the reduced matrix and select skeleton points in X st = get_wtime_sec(); if (krnl_dim == 1) { H2P_dense_mat_resize(QR_buff, tmpA1->nrow, 1); } else { int QR_buff_size = (2 * krnl_dim + 2) * tmpA1->ncol + (krnl_dim + 1) * tmpA1->nrow; H2P_dense_mat_resize(QR_buff, QR_buff_size, 1); } H2P_int_vec_set_capacity(ID_buff, 4 * tmpA1->nrow); et = get_wtime_sec(); timers[GEN_PP_MISC_TIMER_IDX] += et - st; st = get_wtime_sec(); DTYPE reltol_ = reltol * 1e-2; H2P_ID_compress( tmpA1, QR_REL_NRM, &reltol_, NULL, skel_idx, n_thread, QR_buff->data, ID_buff->data, krnl_dim ); et = get_wtime_sec(); timers[GEN_PP_ID_TIMER_IDX] += et - st; st = get_wtime_sec(); H2P_dense_mat_select_columns(X0_coord, skel_idx); H2P_dense_mat_p Xp_coord = X0_coord; et = get_wtime_sec(); timers[GEN_PP_MISC_TIMER_IDX] += et - st; if (alg == 0 || alg == 1) { // 4. Select proxy points in domain Y // (1) Generate the kernel matrix st = get_wtime_sec(); // Be careful, Y0_coord should be placed before Xp_coord H2P_eval_kernel_matrix_OMP(krnl_param, krnl_eval, krnl_dim, Y0_coord, Xp_coord, tmpA1); et = get_wtime_sec(); timers[GEN_PP_KRNL_TIMER_IDX] += et - st; // (2) Calculate ID approximation on the kernel matrix and select new proxy points in Y st = get_wtime_sec(); if (krnl_dim == 1) { H2P_dense_mat_resize(QR_buff, tmpA1->nrow, 1); } else { int QR_buff_size = (2 * krnl_dim + 2) * tmpA1->ncol + (krnl_dim + 1) * tmpA1->nrow; H2P_dense_mat_resize(QR_buff, QR_buff_size, 1); } H2P_int_vec_set_capacity(ID_buff, 4 * tmpA1->nrow); et = get_wtime_sec(); timers[GEN_PP_MISC_TIMER_IDX] += et - st; st = get_wtime_sec(); DTYPE reltol2 = reltol * 1e-2; H2P_ID_compress( tmpA1, QR_REL_NRM, &reltol2, NULL, skel_idx, n_thread, QR_buff->data, ID_buff->data, krnl_dim ); et = get_wtime_sec(); timers[GEN_PP_ID_TIMER_IDX] += et - st; st = get_wtime_sec(); H2P_dense_mat_select_columns(Y0_coord, skel_idx); H2P_dense_mat_resize(Yp_coord, pt_dim, Y0_coord->ncol); copy_matrix_block(sizeof(DTYPE), pt_dim, Y0_coord->ncol, Y0_coord->data, Y0_coord->ld, Yp_coord->data, Yp_coord->ld); et = get_wtime_sec(); timers[GEN_PP_MISC_TIMER_IDX] += et - st; } // End of "if (alg == 1)" if (alg == 2) { // 4. Select proxy points in domain Y layer by layer H2P_dense_mat_resize(Yp_coord, pt_dim, 0); for (int i = 0; i < n_layer; i++) { // (1) Put selected proxy points and i-th layer candidate points together st = get_wtime_sec(); H2P_dense_mat_resize(tmp_coord, pt_dim, Yp_coord->ncol + Y0_lsize); DTYPE *Yp_coord_ptr = Yp_coord->data; DTYPE *Y0_layer_i_ptr = Y0_coord->data + i * Y0_lsize; DTYPE *tmp_coord_Yp_ptr = tmp_coord->data; DTYPE *tmp_coord_li_ptr = tmp_coord->data + Yp_coord->ncol; copy_matrix_block(sizeof(DTYPE), pt_dim, Yp_coord->ncol, Yp_coord_ptr, Yp_coord->ld, tmp_coord_Yp_ptr, tmp_coord->ld); copy_matrix_block(sizeof(DTYPE), pt_dim, Y0_lsize, Y0_layer_i_ptr, Y0_coord->ld, tmp_coord_li_ptr, tmp_coord->ld); et = get_wtime_sec(); timers[GEN_PP_MISC_TIMER_IDX] += et - st; // (2) Generate kernel matrix for this layer st = get_wtime_sec(); // Be careful, tmp_coord should be placed before Xp_coord H2P_eval_kernel_matrix_OMP(krnl_param, krnl_eval, krnl_dim, tmp_coord, Xp_coord, tmpA1); et = get_wtime_sec(); // (3) Calculate ID approximation on the new kernel matrix and select new proxy points in Y st = get_wtime_sec(); if (krnl_dim == 1) { H2P_dense_mat_resize(QR_buff, tmpA1->nrow, 1); } else { int QR_buff_size = (2 * krnl_dim + 2) * tmpA1->ncol + (krnl_dim + 1) * tmpA1->nrow; H2P_dense_mat_resize(QR_buff, QR_buff_size, 1); } H2P_int_vec_set_capacity(ID_buff, 4 * tmpA1->nrow); et = get_wtime_sec(); timers[GEN_PP_MISC_TIMER_IDX] += et - st; st = get_wtime_sec(); DTYPE reltol2 = reltol * 1e-2; H2P_ID_compress( tmpA1, QR_REL_NRM, &reltol2, NULL, skel_idx, n_thread, QR_buff->data, ID_buff->data, krnl_dim ); et = get_wtime_sec(); timers[GEN_PP_ID_TIMER_IDX] += et - st; st = get_wtime_sec(); H2P_dense_mat_select_columns(tmp_coord, skel_idx); H2P_dense_mat_resize(Yp_coord, pt_dim, tmp_coord->ncol); copy_matrix_block(sizeof(DTYPE), pt_dim, tmp_coord->ncol, tmp_coord->data, tmp_coord->ld, Yp_coord->data, Yp_coord->ld); et = get_wtime_sec(); timers[GEN_PP_MISC_TIMER_IDX] += et - st; } // End of i loop } // End of "if (alg == 2)" // 5. Increase the density of selected proxy points if necessary if (reltol >= 1e-12) { // No need to increase the density, just copy it H2P_dense_mat_resize(pp, pt_dim, Yp_coord->ncol); copy_matrix_block(sizeof(DTYPE), pt_dim, Yp_coord->ncol, Yp_coord->data, Yp_coord->ld, pp->data, pp->ld); } else { int Yp_size = Yp_coord->ncol; H2P_dense_mat_resize(min_dist, Yp_size, 1); DTYPE *coord_i = tmpA->data; for (int i = 0; i < Yp_size; i++) min_dist->data[i] = 1e99; for (int i = 0; i < Yp_size; i++) { for (int k = 0; k < pt_dim; k++) coord_i[k] = Yp_coord->data[i + k * Yp_coord->ld]; for (int j = 0; j < i; j++) { DTYPE dist_ij = 0.0; for (int k = 0; k < pt_dim; k++) { DTYPE diff = coord_i[k] - Yp_coord->data[j + k * Yp_coord->ld]; dist_ij += diff * diff; } dist_ij = DSQRT(dist_ij); min_dist->data[i] = MIN(min_dist->data[i], dist_ij); min_dist->data[j] = MIN(min_dist->data[j], dist_ij); } } const int Yp_size2 = Yp_size * 2; H2P_dense_mat_resize(pp, pt_dim, Yp_size2); for (int i = 0; i < Yp_size; i++) { DTYPE *tmp_coord0 = tmpA->data; DTYPE *tmp_coord1 = tmpA->data + pt_dim; for (int j = 0; j < pt_dim; j++) tmp_coord0[j] = Yp_coord->data[i + j * Yp_coord->ld]; DTYPE radius_i_scale = min_dist->data[i] * 0.33; int flag = 1; while (flag == 1) { DTYPE radius_1 = 0.0; for (int j = 0; j < pt_dim; j++) { tmp_coord1[j] = drand48() - 0.5; radius_1 += tmp_coord1[j] * tmp_coord1[j]; } DTYPE inv_radius_1 = 1.0 / DSQRT(radius_1); for (int j = 0; j < pt_dim; j++) { tmp_coord1[j] *= inv_radius_1; tmp_coord1[j] *= radius_i_scale; tmp_coord1[j] += tmp_coord0[j]; } if ((H2P_point_in_box(pt_dim, tmp_coord1, L2) == 0) && (H2P_point_in_box(pt_dim, tmp_coord1, L3) == 1)) flag = 0; } // End of "while (flag == 1)" DTYPE *coord_0 = pp->data + (2 * i); DTYPE *coord_1 = pp->data + (2 * i + 1); for (int j = 0; j < pt_dim; j++) { coord_0[j * Yp_size2] = tmp_coord0[j]; coord_1[j * Yp_size2] = tmp_coord1[j]; } } // End of i loop } // End of "if (reltol >= 1e-12)" // 6. Free working arrays H2P_dense_mat_destroy(&X0_coord); H2P_dense_mat_destroy(&Y0_coord); H2P_dense_mat_destroy(&tmp_coord); H2P_dense_mat_destroy(&Yp_coord); H2P_dense_mat_destroy(&tmpA); H2P_dense_mat_destroy(&min_dist); H2P_dense_mat_destroy(&QR_buff); H2P_int_vec_destroy(&skel_idx); H2P_int_vec_destroy(&ID_buff); free(X0_coord); free(Y0_coord); free(tmp_coord); free(Yp_coord); free(tmpA); free(min_dist); free(QR_buff); free(skel_idx); free(ID_buff); } // ----- Note: "radius" in this file == 0.5 * length of a cubic box ----- // // Calculate the enclosing box of a given set of points and adjust it if the proxy point file is provided void H2P_calc_enclosing_box(const int pt_dim, const int n_point, const DTYPE *coord, const char *fname, DTYPE **enbox_) { // Calculate the center of points in this box DTYPE *center = (DTYPE*) malloc(sizeof(DTYPE) * pt_dim); memset(center, 0, sizeof(DTYPE) * pt_dim); for (int j = 0; j < pt_dim; j++) { const DTYPE *coord_dim_j = coord + j * n_point; for (int i = 0; i < n_point; i++) center[j] += coord_dim_j[i]; } for (int j = 0; j < pt_dim; j++) center[j] /= (DTYPE) n_point; // Calculate enclosing box radius DTYPE radius = 0.0; for (int j = 0; j < pt_dim; j++) { const DTYPE *coord_dim_j = coord + j * n_point; DTYPE center_j = center[j]; for (int i = 0; i < n_point; i++) { DTYPE tmp = DABS(coord_dim_j[i] - center_j); radius = MAX(radius, tmp); } } // Adjust enclosing box radius if proxy point file is available FILE *inf = NULL; if (fname != NULL) inf = fopen(fname, "r"); if (inf != NULL) { int pt_dim_, L3_nlayer, num_pp; DTYPE reltol, minL; const char *fmt_str = (DTYPE_SIZE == 8) ? "%d %lf %d %lf %d" : "%d %f %d %f %d"; fscanf(inf, fmt_str, &pt_dim_, &reltol, &L3_nlayer, &minL, &num_pp); if (pt_dim == pt_dim_) { DTYPE k = DCEIL(DLOG2(radius / minL)); radius = minL * DPOW(2.0, k); } else { ERROR_PRINTF("File %s point dimension (%d) != current point dimension (%d)\n", fname, pt_dim_, pt_dim); } } if (inf != NULL) fclose(inf); // Generate the enclosing box DTYPE *enbox = (DTYPE*) malloc(sizeof(DTYPE) * 2 * pt_dim); for (int j = 0; j < pt_dim; j++) { enbox[j] = center[j] - radius; enbox[pt_dim + j] = 2 * radius; } *enbox_ = enbox; free(center); } // Write a set of proxy points to a text file void H2P_write_proxy_point_file( const char *fname, const int pt_dim, const DTYPE reltol, const int L3_nlayer, const DTYPE minL, const int num_pp, H2P_dense_mat_p *pp ) { FILE *ouf = fopen(fname, "w"); // Line 1: parameters fprintf(ouf, "%d %.3e %d %16.12f %d\n", pt_dim, reltol, L3_nlayer, minL, num_pp); // Line 2: number of proxy points in each proxy point set for (int i = 0; i < num_pp; i++) fprintf(ouf, "%d ", pp[i]->ncol); fprintf(ouf, "\n"); // Rest part: proxy point coordinates for (int i_pp = 0; i_pp < num_pp; i_pp++) { DTYPE *pp_i_coord = pp[i_pp]->data; const int pp_i_npt = pp[i_pp]->ncol; const int pp_i_ld = pp[i_pp]->ld; for (int i = 0; i < pp_i_npt; i++) { for (int j = 0; j < pt_dim; j++) fprintf(ouf, "% 16.12f ", pp_i_coord[j * pp_i_ld + i]); fprintf(ouf, "\n"); } } fclose(ouf); } // Generate proxy points for constructing H2 projection and skeleton matrices using // ID compress, also try to load proxy points from a file and update this file void H2P_generate_proxy_point_ID_file( H2Pack_p h2pack, const void *krnl_param, kernel_eval_fptr krnl_eval, const char *fname, H2P_dense_mat_p **pp_ ) { int pt_dim = h2pack->pt_dim; int krnl_dim = h2pack->krnl_dim; int n_level = h2pack->max_level + 1; DTYPE reltol = h2pack->QR_stop_tol; DTYPE pt_maxL = h2pack->root_enbox[pt_dim] * 0.5; DTYPE pt_minL = pt_maxL * DPOW(0.5, (DTYPE) h2pack->max_level); // Root box and level 1 box do not have admissible pairs --> don't need proxy points pt_maxL *= 0.25; // These are from proxy point file int pt_dim0, L3_nlayer0, num_pp0; DTYPE reltol0, minL0, maxL0; GET_ENV_INT_VAR(gen_pp_param.alg, "H2P_GEN_PP_ALG", "alg", 2, 0, 2); GET_ENV_INT_VAR(gen_pp_param.X0_size, "H2P_GEN_PP_X0_SIZE", "X0_size", 2000, 500, 5000); GET_ENV_INT_VAR(gen_pp_param.Y0_lsize, "H2P_GEN_PP_Y0_LSIZE", "Y0_lsize", 4000, 1000, 20000); GET_ENV_INT_VAR(gen_pp_param.L3_nlayer, "H2P_GEN_PP_L3_NLAYER", "L3_nlayer", 8, 8, 32); GET_ENV_INT_VAR(gen_pp_param.max_layer, "H2P_GEN_PP_MAX_LAYER", "max_layer", 8, 4, 32); GET_ENV_INT_VAR(gen_pp_param.print_timers, "H2P_PRINT_TIMERS", "print_timers", 0, 0, 1); // Determine min & max box radius in the file & for current points FILE *inf = NULL; if (fname != NULL) inf = fopen(fname, "r"); if (inf != NULL) { const char *fmt_str = (DTYPE_SIZE == 8) ? "%d %lf %d %lf %d" : "%d %f %d %f %d"; fscanf(inf, fmt_str, &pt_dim0, &reltol0, &L3_nlayer0, &minL0, &num_pp0); maxL0 = minL0 * DPOW(2.0, (DTYPE) (num_pp0 - 1)); DTYPE k = DLOG2(pt_maxL / minL0); DTYPE rk = (DTYPE) DROUND(k); int aligned_L = (DABS(rk - k) > 1e-10) ? 0 : 1; if (pt_dim0 != pt_dim || reltol0 > reltol || L3_nlayer0 != gen_pp_param.L3_nlayer || aligned_L == 0) { WARNING_PRINTF("Proxy point file parameters are inconsistent with current point set parameters, calculate all proxy points\n"); pt_dim0 = pt_dim; L3_nlayer0 = gen_pp_param.L3_nlayer; num_pp0 = 0; reltol0 = reltol; minL0 = pt_minL; maxL0 = pt_minL * 0.5; // Make maxL0 invalid } } else { pt_dim0 = pt_dim; L3_nlayer0 = gen_pp_param.L3_nlayer; num_pp0 = 0; reltol0 = reltol; minL0 = pt_minL; maxL0 = pt_minL * 0.5; // Make maxL0 invalid } // End of "if (inf != NULL)" DTYPE curr_minL = MIN(pt_minL, minL0); DTYPE curr_maxL = MAX(pt_maxL, maxL0); int curr_num_pp = DROUND(DLOG2(curr_maxL / curr_minL)) + 1; int file_idx_s = DROUND(DLOG2(minL0 / curr_minL)); int file_idx_e = DROUND(DLOG2(maxL0 / curr_minL)); int pt_idx_s = DROUND(DLOG2(pt_minL / curr_minL)); int pt_idx_e = DROUND(DLOG2(pt_maxL / curr_minL)); if (h2pack->print_dbginfo) { DEBUG_PRINTF( "pt_minL, pt_maxL, minL0, maxL0, curr_minL, curr_maxL = %.3lf %.3lf %.3lf %.3lf %.3lf %.3lf\n", pt_minL, pt_maxL, minL0, maxL0, curr_minL, curr_maxL ); DEBUG_PRINTF( "curr_num_pp, file_idx_s, file_idx_e, pt_idx_s, pt_idx_e = %d %d %d %d %d\n", curr_num_pp, file_idx_s, file_idx_e, pt_idx_s, pt_idx_e ); } // Note: radius of pp0[i] == 0.5 * radius of pp0[i+1], need to reverse it for pp_ H2P_dense_mat_p *pp0 = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * curr_num_pp); ASSERT_PRINTF(pp0 != NULL, "Failed to allocate %d arrays for storing proxy points", curr_num_pp); for (int i = 0; i < curr_num_pp; i++) { H2P_dense_mat_init(&pp0[i], pt_dim, 0); pp0[i]->ncol = 0; } // Read proxy points from file if (inf != NULL) { int *pp_sizes = (int*) malloc(sizeof(int) * num_pp0); for (int i = 0; i < num_pp0; i++) fscanf(inf, "%d", pp_sizes + i); for (int pp_i = file_idx_s; pp_i <= file_idx_e; pp_i++) { H2P_dense_mat_p pp0_i = pp0[pp_i]; int pp0_i_npt = pp_sizes[pp_i - file_idx_s]; H2P_dense_mat_resize(pp0_i, pt_dim, pp0_i_npt); DTYPE *pp0_i_coord = pp0_i->data; for (int j = 0; j < pp0_i_npt; j++) { for (int i = 0; i < pt_dim; i++) fscanf(inf, DTYPE_FMTSTR, &pp0_i_coord[i * pp0_i_npt + j]); } } free(pp_sizes); fclose(inf); } // End of if (inf != NULL) // Calculate other proxy points double timers[4]; DTYPE L3_nlayer_ = (DTYPE) gen_pp_param.L3_nlayer; timers[GEN_PP_KRNL_TIMER_IDX] = 0.0; timers[GEN_PP_KRNL_TIMER_IDX] = 0.0; timers[GEN_PP_ID_TIMER_IDX] = 0.0; timers[GEN_PP_MISC_TIMER_IDX] = 0.0; for (int pp_i = 0; pp_i < curr_num_pp; pp_i++) { // Note: curr_minL is the radius, L1 is the edge length, need to * 2 DTYPE L1 = 2.0 * curr_minL * DPOW(2.0, (DTYPE) pp_i); DTYPE L2 = (1.0 + 2.0 * ALPHA_H2) * L1; DTYPE L3 = (1.0 + L3_nlayer_ * ALPHA_H2) * L1; int Y0_lsize_ = gen_pp_param.Y0_lsize; if (gen_pp_param.alg == 0) // Only one ring, multiple Y0_lsize_ by the number of rings { int n_layer = DROUND((L3 - L2) / L1); if (n_layer > gen_pp_param.max_layer) n_layer = gen_pp_param.max_layer; Y0_lsize_ *= n_layer; } if (pp_i >= file_idx_s && pp_i <= file_idx_e) continue; H2P_generate_proxy_point_nlayer( pt_dim, krnl_dim, reltol, krnl_param, krnl_eval, L1, L2, L3, gen_pp_param.alg, gen_pp_param.X0_size, Y0_lsize_, gen_pp_param.max_layer, pp0[pp_i], &timers[0] ); } // End of pp_i loop if (gen_pp_param.print_timers == 1) { INFO_PRINTF( "Proxy point generation: kernel, SpMM, ID, other time = %.3lf, %.3lf, %.3lf, %.3lf sec\n", timers[GEN_PP_KRNL_TIMER_IDX], timers[GEN_PP_KRNL_TIMER_IDX], timers[GEN_PP_ID_TIMER_IDX], timers[GEN_PP_MISC_TIMER_IDX] ); } // Write current proxy points to file if (fname != NULL) H2P_write_proxy_point_file(fname, pt_dim, reltol0, L3_nlayer0, curr_minL, curr_num_pp, pp0); // Copy pp0 to output pp_ and free pp0 H2P_dense_mat_p *pp = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * n_level); ASSERT_PRINTF(pp != NULL, "Failed to allocate %d arrays for storing proxy points", n_level); for (int i = 0; i < n_level; i++) { H2P_dense_mat_init(&pp[i], pt_dim, 0); pp[i]->ncol = 0; } for (int i = pt_idx_s; i <= pt_idx_e; i++) { int level = (n_level - 1) - (i - pt_idx_s); H2P_dense_mat_resize(pp[level], pp0[i]->nrow, pp0[i]->ncol); copy_matrix_block(sizeof(DTYPE), pp0[i]->nrow, pp0[i]->ncol, pp0[i]->data, pp0[i]->ld, pp[level]->data, pp[level]->ld); } for (int i = 0; i < curr_num_pp; i++) H2P_dense_mat_destroy(&pp0[i]); free(pp0); *pp_ = pp; } // Generate uniformly distributed proxy points on a box surface for constructing // H2 projection and skeleton matrices for SOME kernel function void H2P_generate_proxy_point_surface( const int pt_dim, const int xpt_dim, const int min_npt, const int max_level, const int min_level, DTYPE max_L, H2P_dense_mat_p **pp_ ) { if (pt_dim < 2 || pt_dim > 3) { ERROR_PRINTF("Only 2D and 3D systems are supported in this function.\n"); return; } H2P_dense_mat_p *pp = (H2P_dense_mat_p*) malloc(sizeof(H2P_dense_mat_p) * (max_level + 1)); ASSERT_PRINTF(pp != NULL, "Failed to allocate %d H2P_dense_mat structues for storing proxy points", max_level + 1); for (int i = 0; i <= max_level; i++) pp[i] = NULL; int npt_axis, npt; if (pt_dim == 2) { npt_axis = (min_npt + 3) / 4; npt = npt_axis * 4; } else { DTYPE n_point_face = (DTYPE) min_npt / 6.0; npt_axis = (int) ceil(sqrt(n_point_face)); npt = npt_axis * npt_axis * 6; } DTYPE h = 2.0 / (DTYPE) (npt_axis + 1); // Generate proxy points on the surface of [-1,1]^pt_dim box H2P_dense_mat_p unit_pp; H2P_dense_mat_init(&unit_pp, xpt_dim, npt); int index = 0; if (pt_dim == 3) { DTYPE *x = unit_pp->data; DTYPE *y = unit_pp->data + npt; DTYPE *z = unit_pp->data + npt * 2; DTYPE h_i = -1.0; for (int i = 0; i < npt_axis; i++) { h_i += h; DTYPE h_j = -1.0; for (int j = 0; j < npt_axis; j++) { h_j += h; x[index + 0] = h_i; y[index + 0] = h_j; z[index + 0] = -1.0; x[index + 1] = h_i; y[index + 1] = h_j; z[index + 1] = 1.0; x[index + 2] = h_i; y[index + 2] = -1.0; z[index + 2] = h_j; x[index + 3] = h_i; y[index + 3] = 1.0; z[index + 3] = h_j; x[index + 4] = -1.0; y[index + 4] = h_i; z[index + 4] = h_j; x[index + 5] = 1.0; y[index + 5] = h_i; z[index + 5] = h_j; index += 6; } } } // End of "if (pt_dim == 3)" if (pt_dim == 2) { DTYPE *x = unit_pp->data; DTYPE *y = unit_pp->data + npt; DTYPE h_i = -1.0; for (int i = 0; i < npt_axis; i++) { h_i += h; x[index + 0] = h_i; y[index + 0] = -1.0; x[index + 1] = h_i; y[index + 1] = 1.0; x[index + 2] = -1.0; y[index + 2] = h_i; x[index + 3] = 1.0; y[index + 3] = h_i; index += 4; } } // End of "if (pt_dim == 2)" if (xpt_dim > pt_dim) { DTYPE *ext = unit_pp->data + npt * pt_dim; memset(ext, 0, sizeof(DTYPE) * npt); } // Scale proxy points on unit box surface to different size as // proxy points on different levels DTYPE pow_2_level = 0.5; for (int level = 0; level < min_level; level++) pow_2_level *= 2.0; for (int level = min_level; level <= max_level; level++) { pow_2_level *= 2.0; H2P_dense_mat_init(&pp[level], xpt_dim, npt); DTYPE box_width = max_L / pow_2_level * 0.5; DTYPE adm_width = (1.0 + 2.0 * ALPHA_H2) * box_width; DTYPE *pp_level = pp[level]->data; #pragma omp simd for (int i = 0; i < xpt_dim * npt; i++) pp_level[i] = adm_width * unit_pp->data[i]; } H2P_dense_mat_destroy(&unit_pp); *pp_ = pp; }
jacobi-ompacc-opt2.c
// Liao, 7/9/2014, add collapse() inside jacobi() // Liao, 1/22/2015, test nested map() clauses supported by device data environment reuse. #include <stdio.h> #include <math.h> #include <assert.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #endif // Add timing support #include <sys/time.h> double time_stamp() { struct timeval t; double time; gettimeofday(&t,(struct timezone*)NULL); time = t.tv_sec + 1.0e-6*t.tv_usec; return time; } double time1, time2; void driver(void); void initialize(void); void jacobi(void); void error_check(void); /************************************************************ * program to solve a finite difference * discretization of Helmholtz equation : * (d2/dx2)u + (d2/dy2)u - alpha u = f * using Jacobi iterative method. * * Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 * Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 * * This c version program is translated by * Chunhua Liao, University of Houston, Jan, 2005 * * Directives are used in this code to achieve parallelism. * All do loops are parallelized with default 'static' scheduling. * * Input : n - grid dimension in x direction * m - grid dimension in y direction * alpha - Helmholtz constant (always greater than 0.0) * tol - error tolerance for iterative solver * relax - Successice over relaxation parameter * mits - Maximum iterations for iterative solver * * On output * : u(n,m) - Dependent variable (solutions) * : f(n,m) - Right hand side function *************************************************************/ #define MSIZE 512 int n,m,mits; #define REAL float // flexible between float and double REAL error_ref= 9.212767E-04, resid_ref = 2.355429E-08; // depending on MSIZE!! REAL tol,relax=1.0,alpha=0.0543; REAL u[MSIZE][MSIZE],f[MSIZE][MSIZE],uold[MSIZE][MSIZE]; REAL dx,dy; int main (void) { // float toler; /* printf("Input n,m (< %d) - grid dimension in x,y direction:\n",MSIZE); scanf ("%d",&n); scanf ("%d",&m); printf("Input tol - error tolerance for iterative solver\n"); scanf("%f",&toler); tol=(double)toler; printf("Input mits - Maximum iterations for solver\n"); scanf("%d",&mits); */ n=MSIZE; m=MSIZE; tol=0.0000000001; mits=5000; #if 0 // Not yet support concurrent CPU and GPU threads #ifdef _OPENMP #pragma omp parallel { #pragma omp single printf("Running using %d threads...\n",omp_get_num_threads()); } #endif #endif driver ( ) ; return 0; } /************************************************************* * Subroutine driver () * This is where the arrays are allocated and initialzed. * * Working varaibles/arrays * dx - grid spacing in x direction * dy - grid spacing in y direction *************************************************************/ void driver( ) { initialize(); time1 = time_stamp(); /* Solve Helmholtz equation */ jacobi (); time2 = time_stamp(); printf("------------------------\n"); printf("Execution time = %f\n",time2-time1); /* error_check (n,m,alpha,dx,dy,u,f)*/ error_check ( ); } /* subroutine initialize (n,m,alpha,dx,dy,u,f) ****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize( ) { int i,j, xx,yy; //double PI=3.1415926; dx = 2.0 / (n-1); dy = 2.0 / (m-1); /* Initialize initial condition and RHS */ //#pragma omp parallel for private(xx,yy,j,i) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx =(int)( -1.0 + dx * (i-1)); yy = (int)(-1.0 + dy * (j-1)) ; u[i][j] = 0.0; f[i][j] = -1.0*alpha *(1.0-xx*xx)*(1.0-yy*yy)\ - 2.0*(1.0-xx*xx)-2.0*(1.0-yy*yy); } } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution *****************************************************************/ void jacobi( ) { REAL omega; int i,j,k; REAL error,resid,ax,ay,b; // double error_local; // float ta,tb,tc,td,te,ta1,ta2,tb1,tb2,tc1,tc2,td1,td2; // float te1,te2; // float second; omega=relax; /* * Initialize coefficients */ ax = 1.0/(dx*dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y-direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; // An optimization on top of naive coding: promoting data handling outside the while loop // data properties may change since the scope is bigger: #pragma omp target data map(to:n, m, omega, ax, ay, b, f[0:n][0:m]) map(tofrom:u[0:n][0:m]) map(alloc:uold[0:n][0:m]) while ((k<=mits)&&(error>tol)) { error = 0.0; /* Copy new solution into old */ //#pragma omp parallel // { #pragma omp target map(to:n, m, u[0:n][0:m]) map(from:uold[0:n][0:m]) #pragma omp parallel for private(j,i) collapse(2) for(i=0;i<n;i++) for(j=0;j<m;j++) uold[i][j] = u[i][j]; #pragma omp target map(to:n, m, omega, ax, ay, b, f[0:n][0:m], uold[0:n][0:m]) map(from:u[0:n][0:m]) #pragma omp parallel for private(resid,j,i) reduction(+:error) collapse(2) // nowait for (i=1;i<(n-1);i++) for (j=1;j<(m-1);j++) { resid = (ax*(uold[i-1][j] + uold[i+1][j])\ + ay*(uold[i][j-1] + uold[i][j+1])+ b * uold[i][j] - f[i][j])/b; u[i][j] = uold[i][j] - omega * resid; error = error + resid*resid ; } // } /* omp end parallel */ /* Error check */ if (k%500==0) printf("Finished %d iteration with error =%f\n",k, error); error = sqrt(error)/(n*m); k = k + 1; } /* End iteration loop */ printf("Total Number of Iterations:%d\n",k); printf("Residual:%E\n", error); printf("Residual_ref :%E\n", resid_ref); printf ("Diff ref=%E\n", fabs(error-resid_ref)); assert (fabs(error-resid_ref) < 1E-14); } /* subroutine error_check (n,m,alpha,dx,dy,u,f) implicit none ************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check ( ) { int i,j; REAL xx,yy,temp,error; dx = 2.0 / (n-1); dy = 2.0 / (m-1); error = 0.0 ; //#pragma omp parallel for private(xx,yy,temp,j,i) reduction(+:error) for (i=0;i<n;i++) for (j=0;j<m;j++) { xx = -1.0 + dx * (i-1); yy = -1.0 + dy * (j-1); temp = u[i][j] - (1.0-xx*xx)*(1.0-yy*yy); error = error + temp*temp; } error = sqrt(error)/(n*m); printf("Solution Error :%E \n",error); printf("Solution Error Ref :%E \n",error_ref); printf ("Diff ref=%E\n", fabs(error-error_ref)); assert (fabs(error-error_ref) < 1E-14); }
GB_binop__iseq_fc64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__iseq_fc64 // A.*B function (eWiseMult): GB_AemultB__iseq_fc64 // A*D function (colscale): (none) // D*A function (rowscale): (node) // C+=B function (dense accum): GB_Cdense_accumB__iseq_fc64 // C+=b function (dense accum): GB_Cdense_accumb__iseq_fc64 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__iseq_fc64 // C=scalar+B GB_bind1st__iseq_fc64 // C=scalar+B' GB_bind1st_tran__iseq_fc64 // C=A+scalar GB_bind2nd__iseq_fc64 // C=A'+scalar GB_bind2nd_tran__iseq_fc64 // C type: GxB_FC64_t // A type: GxB_FC64_t // B,b type: GxB_FC64_t // BinaryOp: cij = GB_FC64_iseq (aij, bij) #define GB_ATYPE \ GxB_FC64_t #define GB_BTYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ GxB_FC64_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ GxB_FC64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = GB_FC64_iseq (x, y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ISEQ || GxB_NO_FC64 || GxB_NO_ISEQ_FC64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__iseq_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__iseq_fc64 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__iseq_fc64 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type GxB_FC64_t GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (none) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ #if 0 GrB_Info (node) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *GB_RESTRICT Cx = (GxB_FC64_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ #undef GB_FREE_ALL #define GB_FREE_ALL \ { \ GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \ GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \ GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \ } GrB_Info GB_AaddB__iseq_fc64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_add_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__iseq_fc64 ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ; int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ; int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ; #include "GB_emult_template.c" GB_FREE_ALL ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__iseq_fc64 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *GB_RESTRICT Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ; GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; GxB_FC64_t bij = Bx [p] ; Cx [p] = GB_FC64_iseq (x, bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__iseq_fc64 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *GB_RESTRICT Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ; GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ; GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; GxB_FC64_t aij = Ax [p] ; Cx [p] = GB_FC64_iseq (aij, y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_iseq (x, aij) ; \ } GrB_Info GB_bind1st_tran__iseq_fc64 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ GxB_FC64_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ GxB_FC64_t aij = Ax [pA] ; \ Cx [pC] = GB_FC64_iseq (aij, y) ; \ } GrB_Info GB_bind2nd_tran__iseq_fc64 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__ainv_int32_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int32_uint8 // op(A') function: GB_tran__ainv_int32_uint8 // C type: int32_t // A type: uint8_t // cast: int32_t cij = (int32_t) aij // unaryop: cij = -aij #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ int32_t z = (int32_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT32 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int32_uint8 ( int32_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int32_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
segment.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % SSSSS EEEEE GGGG M M EEEEE N N TTTTT % % SS E G MM MM E NN N T % % SSS EEE G GGG M M M EEE N N N T % % SS E G G M M E N NN T % % SSSSS EEEEE GGGG M M EEEEE N N T % % % % % % MagickCore Methods to Segment an Image with Thresholding Fuzzy c-Means % % % % Software Design % % John Cristy % % April 1993 % % % % % % Copyright 1999-2012 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Segment segments an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % c-means technique. The scale-space filter analyzes the histograms of % the three color components of the image and identifies a set of % classes. The extents of each class is used to coarsely segment the % image with thresholding. The color associated with each class is % determined by the mean color of all pixels within the extents of a % particular class. Finally, any unclassified pixels are assigned to % the closest class with the fuzzy c-means technique. % % The fuzzy c-Means algorithm can be summarized as follows: % % o Build a histogram, one for each color component of the image. % % o For each histogram, successively apply the scale-space filter and % build an interval tree of zero crossings in the second derivative % at each scale. Analyze this scale-space ``fingerprint'' to % determine which peaks and valleys in the histogram are most % predominant. % % o The fingerprint defines intervals on the axis of the histogram. % Each interval contains either a minima or a maxima in the original % signal. If each color component lies within the maxima interval, % that pixel is considered ``classified'' and is assigned an unique % class number. % % o Any pixel that fails to be classified in the above thresholding % pass is classified using the fuzzy c-Means technique. It is % assigned to one of the classes discovered in the histogram analysis % phase. % % The fuzzy c-Means technique attempts to cluster a pixel by finding % the local minima of the generalized within group sum of squared error % objective function. A pixel is assigned to the closest class of % which the fuzzy membership has a maximum value. % % Segment is strongly based on software written by Andy Gallo, % University of Delaware. % % The following reference was used in creating this program: % % Young Won Lim, Sang Uk Lee, "On The Color Image Segmentation % Algorithm Based on the Thresholding and the Fuzzy c-Means % Techniques", Pattern Recognition, Volume 23, Number 9, pages % 935-952, 1990. % % */ #include "magick/studio.h" #include "magick/cache.h" #include "magick/color.h" #include "magick/colormap.h" #include "magick/colorspace.h" #include "magick/colorspace-private.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/memory_.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/quantize.h" #include "magick/quantum.h" #include "magick/quantum-private.h" #include "magick/segment.h" #include "magick/string_.h" /* Define declarations. */ #define MaxDimension 3 #define DeltaTau 0.5f #if defined(FastClassify) #define WeightingExponent 2.0 #define SegmentPower(ratio) (ratio) #else #define WeightingExponent 2.5 #define SegmentPower(ratio) pow(ratio,(double) (1.0/(weighting_exponent-1.0))); #endif #define Tau 5.2f /* Typedef declarations. */ typedef struct _ExtentPacket { MagickRealType center; ssize_t index, left, right; } ExtentPacket; typedef struct _Cluster { struct _Cluster *next; ExtentPacket red, green, blue; ssize_t count, id; } Cluster; typedef struct _IntervalTree { MagickRealType tau; ssize_t left, right; MagickRealType mean_stability, stability; struct _IntervalTree *sibling, *child; } IntervalTree; typedef struct _ZeroCrossing { MagickRealType tau, histogram[256]; short crossings[256]; } ZeroCrossing; /* Constant declarations. */ static const int Blue = 2, Green = 1, Red = 0, SafeMargin = 3, TreeLength = 600; /* Method prototypes. */ static MagickRealType OptimalTau(const ssize_t *,const double,const double,const double, const double,short *); static ssize_t DefineRegion(const short *,ExtentPacket *); static void InitializeHistogram(const Image *,ssize_t **,ExceptionInfo *), ScaleSpace(const ssize_t *,const MagickRealType,MagickRealType *), ZeroCrossHistogram(MagickRealType *,const MagickRealType,short *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C l a s s i f y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Classify() defines one or more classes. Each pixel is thresholded to % determine which class it belongs to. If the class is not identified it is % assigned to the closest class based on the fuzzy c-Means technique. % % The format of the Classify method is: % % MagickBooleanType Classify(Image *image,short **extrema, % const MagickRealType cluster_threshold, % const MagickRealType weighting_exponent, % const MagickBooleanType verbose) % % A description of each parameter follows. % % o image: the image. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o weighting_exponent: Specifies the membership weighting exponent. % % o verbose: A value greater than zero prints detailed information about % the identified classes. % */ static MagickBooleanType Classify(Image *image,short **extrema, const MagickRealType cluster_threshold, const MagickRealType weighting_exponent,const MagickBooleanType verbose) { #define SegmentImageTag "Segment/Image" CacheView *image_view; Cluster *cluster, *head, *last_cluster, *next_cluster; ExceptionInfo *exception; ExtentPacket blue, green, red; MagickOffsetType progress; MagickRealType *free_squares; MagickStatusType status; register ssize_t i; register MagickRealType *squares; size_t number_clusters; ssize_t count, y; /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ status=MagickTrue; count=0; progress=0; exception=(&image->exception); image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } number_clusters=(size_t) count; if (verbose != MagickFalse) { /* Print cluster statistics. */ (void) FormatLocaleFile(stdout,"Fuzzy C-means Statistics\n"); (void) FormatLocaleFile(stdout,"===================\n\n"); (void) FormatLocaleFile(stdout,"\tCluster Threshold = %g\n",(double) cluster_threshold); (void) FormatLocaleFile(stdout,"\tWeighting Exponent = %g\n",(double) weighting_exponent); (void) FormatLocaleFile(stdout,"\tTotal Number of Clusters = %.20g\n\n", (double) number_clusters); /* Print the total number of points per cluster. */ (void) FormatLocaleFile(stdout,"\n\nNumber of Vectors Per Cluster\n"); (void) FormatLocaleFile(stdout,"=============================\n\n"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) (void) FormatLocaleFile(stdout,"Cluster #%.20g = %.20g\n",(double) cluster->id,(double) cluster->count); /* Print the cluster extents. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Extents: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"================"); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout, "%.20g-%.20g %.20g-%.20g %.20g-%.20g\n",(double) cluster->red.left,(double) cluster->red.right,(double) cluster->green.left,(double) cluster->green.right,(double) cluster->blue.left,(double) cluster->blue.right); } /* Print the cluster center values. */ (void) FormatLocaleFile(stdout, "\n\n\nCluster Center Values: (Vector Size: %d)\n",MaxDimension); (void) FormatLocaleFile(stdout,"====================="); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { (void) FormatLocaleFile(stdout,"\n\nCluster #%.20g\n\n",(double) cluster->id); (void) FormatLocaleFile(stdout,"%g %g %g\n",(double) cluster->red.center,(double) cluster->green.center,(double) cluster->blue.center); } (void) FormatLocaleFile(stdout,"\n"); } if (number_clusters > 256) ThrowBinaryException(ImageError,"TooManyClusters",image->filename); /* Speed up distance calculations. */ squares=(MagickRealType *) AcquireQuantumMemory(513UL,sizeof(*squares)); if (squares == (MagickRealType *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); squares+=255; for (i=(-255); i <= 255; i++) squares[i]=(MagickRealType) i*(MagickRealType) i; /* Allocate image colormap. */ if (AcquireImageColormap(image,number_clusters) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); i=0; for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { image->colormap[i].red=ScaleCharToQuantum((unsigned char) (cluster->red.center+0.5)); image->colormap[i].green=ScaleCharToQuantum((unsigned char) (cluster->green.center+0.5)); image->colormap[i].blue=ScaleCharToQuantum((unsigned char) (cluster->blue.center+0.5)); i++; } /* Do course grain classes. */ exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) #endif for (y=0; y < (ssize_t) image->rows; y++) { Cluster *cluster; register const PixelPacket *restrict p; register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { SetPixelIndex(indexes+x,0); for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) { if (((ssize_t) ScaleQuantumToChar(q->red) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->red) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->green) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(q->blue) <= (cluster->blue.right+SafeMargin))) { /* Classify this pixel. */ SetPixelIndex(indexes+x,cluster->id); break; } } if (cluster == (Cluster *) NULL) { MagickRealType distance_squared, local_minima, numerator, ratio, sum; register ssize_t j, k; /* Compute fuzzy membership. */ local_minima=0.0; for (j=0; j < (ssize_t) image->colors; j++) { sum=0.0; p=image->colormap+j; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; numerator=distance_squared; for (k=0; k < (ssize_t) image->colors; k++) { p=image->colormap+k; distance_squared=squares[(ssize_t) ScaleQuantumToChar(q->red)- (ssize_t) ScaleQuantumToChar(GetPixelRed(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->green)- (ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]+ squares[(ssize_t) ScaleQuantumToChar(q->blue)- (ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]; ratio=numerator/distance_squared; sum+=SegmentPower(ratio); } if ((sum != 0.0) && ((1.0/sum) > local_minima)) { /* Classify this pixel. */ local_minima=1.0/sum; SetPixelIndex(indexes+x,j); } } } q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_Classify) #endif proceed=SetImageProgress(image,SegmentImageTag,progress++, 2*image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); status&=SyncImage(image); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } squares-=255; free_squares=squares; free_squares=(MagickRealType *) RelinquishMagickMemory(free_squares); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C r o s s i n g s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCrossings() guarantees that an even number of zero crossings % always lie between two crossings. % % The format of the ConsolidateCrossings method is: % % ConsolidateCrossings(ZeroCrossing *zero_crossing, % const size_t number_crossings) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static inline ssize_t MagickAbsoluteValue(const ssize_t x) { if (x < 0) return(-x); return(x); } static inline ssize_t MagickMax(const ssize_t x,const ssize_t y) { if (x > y) return(x); return(y); } static inline ssize_t MagickMin(const ssize_t x,const ssize_t y) { if (x < y) return(x); return(y); } static void ConsolidateCrossings(ZeroCrossing *zero_crossing, const size_t number_crossings) { register ssize_t i, j, k, l; ssize_t center, correct, count, left, right; /* Consolidate zero crossings. */ for (i=(ssize_t) number_crossings-1; i >= 0; i--) for (j=0; j <= 255; j++) { if (zero_crossing[i].crossings[j] == 0) continue; /* Find the entry that is closest to j and still preserves the property that there are an even number of crossings between intervals. */ for (k=j-1; k > 0; k--) if (zero_crossing[i+1].crossings[k] != 0) break; left=MagickMax(k,0); center=j; for (k=j+1; k < 255; k++) if (zero_crossing[i+1].crossings[k] != 0) break; right=MagickMin(k,255); /* K is the zero crossing just left of j. */ for (k=j-1; k > 0; k--) if (zero_crossing[i].crossings[k] != 0) break; if (k < 0) k=0; /* Check center for an even number of crossings between k and j. */ correct=(-1); if (zero_crossing[i+1].crossings[j] != 0) { count=0; for (l=k+1; l < center; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (center != k)) correct=center; } /* Check left for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < left; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (left != k)) correct=left; } /* Check right for an even number of crossings between k and j. */ if (correct == -1) { count=0; for (l=k+1; l < right; l++) if (zero_crossing[i+1].crossings[l] != 0) count++; if (((count % 2) == 0) && (right != k)) correct=right; } l=(ssize_t) zero_crossing[i].crossings[j]; zero_crossing[i].crossings[j]=0; if (correct != -1) zero_crossing[i].crossings[correct]=(short) l; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e f i n e R e g i o n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DefineRegion() defines the left and right boundaries of a peak region. % % The format of the DefineRegion method is: % % ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) % % A description of each parameter follows. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % % o extents: This pointer to an ExtentPacket represent the extends % of a particular peak or valley of a color component. % */ static ssize_t DefineRegion(const short *extrema,ExtentPacket *extents) { /* Initialize to default values. */ extents->left=0; extents->center=0.0; extents->right=255; /* Find the left side (maxima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] > 0) break; if (extents->index > 255) return(MagickFalse); /* no left side - no region exists */ extents->left=extents->index; /* Find the right side (minima). */ for ( ; extents->index <= 255; extents->index++) if (extrema[extents->index] < 0) break; extents->right=extents->index-1; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e r i v a t i v e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DerivativeHistogram() determines the derivative of the histogram using % central differencing. % % The format of the DerivativeHistogram method is: % % DerivativeHistogram(const MagickRealType *histogram, % MagickRealType *derivative) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % % o derivative: This array of MagickRealTypes is initialized by % DerivativeHistogram to the derivative of the histogram using central % differencing. % */ static void DerivativeHistogram(const MagickRealType *histogram, MagickRealType *derivative) { register ssize_t i, n; /* Compute endpoints using second order polynomial interpolation. */ n=255; derivative[0]=(-1.5*histogram[0]+2.0*histogram[1]-0.5*histogram[2]); derivative[n]=(0.5*histogram[n-2]-2.0*histogram[n-1]+1.5*histogram[n]); /* Compute derivative using central differencing. */ for (i=1; i < n; i++) derivative[i]=(histogram[i+1]-histogram[i-1])/2.0; return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e D y n a m i c T h r e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageDynamicThreshold() returns the dynamic threshold for an image. % % The format of the GetImageDynamicThreshold method is: % % MagickBooleanType GetImageDynamicThreshold(const Image *image, % const double cluster_threshold,const double smooth_threshold, % MagickPixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows. % % o image: the image. % % o cluster_threshold: This MagickRealType represents the minimum number of % pixels contained in a hexahedra before it can be considered valid % (expressed as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % % o pixel: return the dynamic threshold here. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GetImageDynamicThreshold(const Image *image, const double cluster_threshold,const double smooth_threshold, MagickPixelPacket *pixel,ExceptionInfo *exception) { Cluster *background, *cluster, *object, *head, *last_cluster, *next_cluster; ExtentPacket blue, green, red; MagickBooleanType proceed; MagickRealType threshold; register const PixelPacket *p; register ssize_t i, x; short *extrema[MaxDimension]; ssize_t count, *histogram[MaxDimension], y; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); GetMagickPixelPacket(image,pixel); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256UL,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256UL,sizeof(**histogram)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } } /* Initialize histogram. */ InitializeHistogram(image,histogram,exception); (void) OptimalTau(histogram[Red],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2f,DeltaTau, (smooth_threshold == 0.0f ? 1.0f : smooth_threshold),extrema[Blue]); /* Form clusters. */ cluster=(Cluster *) NULL; head=(Cluster *) NULL; (void) ResetMagickMemory(&red,0,sizeof(red)); (void) ResetMagickMemory(&green,0,sizeof(green)); (void) ResetMagickMemory(&blue,0,sizeof(blue)); while (DefineRegion(extrema[Red],&red) != 0) { green.index=0; while (DefineRegion(extrema[Green],&green) != 0) { blue.index=0; while (DefineRegion(extrema[Blue],&blue) != 0) { /* Allocate a new class. */ if (head != (Cluster *) NULL) { cluster->next=(Cluster *) AcquireMagickMemory( sizeof(*cluster->next)); cluster=cluster->next; } else { cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); head=cluster; } if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'", image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; } } } if (head == (Cluster *) NULL) { /* No classes were identified-- create one. */ cluster=(Cluster *) AcquireMagickMemory(sizeof(*cluster)); if (cluster == (Cluster *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(MagickFalse); } /* Initialize a new class. */ cluster->count=0; cluster->red=red; cluster->green=green; cluster->blue=blue; cluster->next=(Cluster *) NULL; head=cluster; } /* Count the pixels for each cluster. */ count=0; for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { for (cluster=head; cluster != (Cluster *) NULL; cluster=cluster->next) if (((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) >= (cluster->red.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelRed(p)) <= (cluster->red.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) >= (cluster->green.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelGreen(p)) <= (cluster->green.right+SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) >= (cluster->blue.left-SafeMargin)) && ((ssize_t) ScaleQuantumToChar(GetPixelBlue(p)) <= (cluster->blue.right+SafeMargin))) { /* Count this pixel. */ count++; cluster->red.center+=(MagickRealType) ScaleQuantumToChar(GetPixelRed(p)); cluster->green.center+=(MagickRealType) ScaleQuantumToChar(GetPixelGreen(p)); cluster->blue.center+=(MagickRealType) ScaleQuantumToChar(GetPixelBlue(p)); cluster->count++; break; } p++; } proceed=SetImageProgress(image,SegmentImageTag,(MagickOffsetType) y, 2*image->rows); if (proceed == MagickFalse) break; } /* Remove clusters that do not meet minimum cluster threshold. */ count=0; last_cluster=head; next_cluster=head; for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; if ((cluster->count > 0) && (cluster->count >= (count*cluster_threshold/100.0))) { /* Initialize cluster. */ cluster->id=count; cluster->red.center/=cluster->count; cluster->green.center/=cluster->count; cluster->blue.center/=cluster->count; count++; last_cluster=cluster; continue; } /* Delete cluster. */ if (cluster == head) head=next_cluster; else last_cluster->next=next_cluster; cluster=(Cluster *) RelinquishMagickMemory(cluster); } object=head; background=head; if (count > 1) { object=head->next; for (cluster=object; cluster->next != (Cluster *) NULL; ) { if (cluster->count < object->count) object=cluster; cluster=cluster->next; } background=head->next; for (cluster=background; cluster->next != (Cluster *) NULL; ) { if (cluster->count > background->count) background=cluster; cluster=cluster->next; } } threshold=(background->red.center+object->red.center)/2.0; pixel->red=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->green.center+object->green.center)/2.0; pixel->green=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); threshold=(background->blue.center+object->blue.center)/2.0; pixel->blue=(MagickRealType) ScaleCharToQuantum((unsigned char) (threshold+0.5)); /* Relinquish resources. */ for (cluster=head; cluster != (Cluster *) NULL; cluster=next_cluster) { next_cluster=cluster->next; cluster=(Cluster *) RelinquishMagickMemory(cluster); } for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeHistogram() computes the histogram for an image. % % The format of the InitializeHistogram method is: % % InitializeHistogram(const Image *image,ssize_t **histogram) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % */ static void InitializeHistogram(const Image *image,ssize_t **histogram, ExceptionInfo *exception) { register const PixelPacket *p; register ssize_t i, x; ssize_t y; /* Initialize histogram. */ for (i=0; i <= 255; i++) { histogram[Red][i]=0; histogram[Green][i]=0; histogram[Blue][i]=0; } for (y=0; y < (ssize_t) image->rows; y++) { p=GetVirtualPixels(image,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { histogram[Red][(ssize_t) ScaleQuantumToChar(GetPixelRed(p))]++; histogram[Green][(ssize_t) ScaleQuantumToChar(GetPixelGreen(p))]++; histogram[Blue][(ssize_t) ScaleQuantumToChar(GetPixelBlue(p))]++; p++; } } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I n i t i a l i z e I n t e r v a l T r e e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeIntervalTree() initializes an interval tree from the lists of % zero crossings. % % The format of the InitializeIntervalTree method is: % % InitializeIntervalTree(IntervalTree **list,ssize_t *number_nodes, % IntervalTree *node) % % A description of each parameter follows. % % o zero_crossing: Specifies an array of structures of type ZeroCrossing. % % o number_crossings: This size_t specifies the number of elements % in the zero_crossing array. % */ static void InitializeList(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) list[(*number_nodes)++]=node; InitializeList(list,number_nodes,node->sibling); InitializeList(list,number_nodes,node->child); } static void MeanStability(IntervalTree *node) { register IntervalTree *child; if (node == (IntervalTree *) NULL) return; node->mean_stability=0.0; child=node->child; if (child != (IntervalTree *) NULL) { register ssize_t count; register MagickRealType sum; sum=0.0; count=0; for ( ; child != (IntervalTree *) NULL; child=child->sibling) { sum+=child->stability; count++; } node->mean_stability=sum/(MagickRealType) count; } MeanStability(node->sibling); MeanStability(node->child); } static void Stability(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->child == (IntervalTree *) NULL) node->stability=0.0; else node->stability=node->tau-(node->child)->tau; Stability(node->sibling); Stability(node->child); } static IntervalTree *InitializeIntervalTree(const ZeroCrossing *zero_crossing, const size_t number_crossings) { IntervalTree *head, **list, *node, *root; register ssize_t i; ssize_t j, k, left, number_nodes; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return((IntervalTree *) NULL); /* The root is the entire histogram. */ root=(IntervalTree *) AcquireMagickMemory(sizeof(*root)); root->child=(IntervalTree *) NULL; root->sibling=(IntervalTree *) NULL; root->tau=0.0; root->left=0; root->right=255; for (i=(-1); i < (ssize_t) number_crossings; i++) { /* Initialize list with all nodes with no children. */ number_nodes=0; InitializeList(list,&number_nodes,root); /* Split list. */ for (j=0; j < number_nodes; j++) { head=list[j]; left=head->left; node=head; for (k=head->left+1; k < head->right; k++) { if (zero_crossing[i+1].crossings[k] != 0) { if (node == head) { node->child=(IntervalTree *) AcquireMagickMemory( sizeof(*node->child)); node=node->child; } else { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; } node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=k; left=k; } } if (left != head->left) { node->sibling=(IntervalTree *) AcquireMagickMemory( sizeof(*node->sibling)); node=node->sibling; node->tau=zero_crossing[i+1].tau; node->child=(IntervalTree *) NULL; node->sibling=(IntervalTree *) NULL; node->left=left; node->right=head->right; } } } /* Determine the stability: difference between a nodes tau and its child. */ Stability(root->child); MeanStability(root->child); list=(IntervalTree **) RelinquishMagickMemory(list); return(root); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + O p t i m a l T a u % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OptimalTau() finds the optimal tau for each band of the histogram. % % The format of the OptimalTau method is: % % MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, % const double min_tau,const double delta_tau, % const double smooth_threshold,short *extrema) % % A description of each parameter follows. % % o histogram: Specifies an array of integers representing the number % of pixels for each intensity of a particular color component. % % o extrema: Specifies a pointer to an array of integers. They % represent the peaks and valleys of the histogram for each color % component. % */ static void ActiveNodes(IntervalTree **list,ssize_t *number_nodes, IntervalTree *node) { if (node == (IntervalTree *) NULL) return; if (node->stability >= node->mean_stability) { list[(*number_nodes)++]=node; ActiveNodes(list,number_nodes,node->sibling); } else { ActiveNodes(list,number_nodes,node->sibling); ActiveNodes(list,number_nodes,node->child); } } static void FreeNodes(IntervalTree *node) { if (node == (IntervalTree *) NULL) return; FreeNodes(node->sibling); FreeNodes(node->child); node=(IntervalTree *) RelinquishMagickMemory(node); } static MagickRealType OptimalTau(const ssize_t *histogram,const double max_tau, const double min_tau,const double delta_tau,const double smooth_threshold, short *extrema) { IntervalTree **list, *node, *root; MagickBooleanType peak; MagickRealType average_tau, *derivative, *second_derivative, tau, value; register ssize_t i, x; size_t count, number_crossings; ssize_t index, j, k, number_nodes; ZeroCrossing *zero_crossing; /* Allocate interval tree. */ list=(IntervalTree **) AcquireQuantumMemory((size_t) TreeLength, sizeof(*list)); if (list == (IntervalTree **) NULL) return(0.0); /* Allocate zero crossing list. */ count=(size_t) ((max_tau-min_tau)/delta_tau)+2; zero_crossing=(ZeroCrossing *) AcquireQuantumMemory((size_t) count, sizeof(*zero_crossing)); if (zero_crossing == (ZeroCrossing *) NULL) return(0.0); for (i=0; i < (ssize_t) count; i++) zero_crossing[i].tau=(-1.0); /* Initialize zero crossing list. */ derivative=(MagickRealType *) AcquireQuantumMemory(256,sizeof(*derivative)); second_derivative=(MagickRealType *) AcquireQuantumMemory(256, sizeof(*second_derivative)); if ((derivative == (MagickRealType *) NULL) || (second_derivative == (MagickRealType *) NULL)) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateDerivatives"); i=0; for (tau=max_tau; tau >= min_tau; tau-=delta_tau) { zero_crossing[i].tau=tau; ScaleSpace(histogram,tau,zero_crossing[i].histogram); DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); i++; } /* Add an entry for the original histogram. */ zero_crossing[i].tau=0.0; for (j=0; j <= 255; j++) zero_crossing[i].histogram[j]=(MagickRealType) histogram[j]; DerivativeHistogram(zero_crossing[i].histogram,derivative); DerivativeHistogram(derivative,second_derivative); ZeroCrossHistogram(second_derivative,smooth_threshold, zero_crossing[i].crossings); number_crossings=(size_t) i; derivative=(MagickRealType *) RelinquishMagickMemory(derivative); second_derivative=(MagickRealType *) RelinquishMagickMemory(second_derivative); /* Ensure the scale-space fingerprints form lines in scale-space, not loops. */ ConsolidateCrossings(zero_crossing,number_crossings); /* Force endpoints to be included in the interval. */ for (i=0; i <= (ssize_t) number_crossings; i++) { for (j=0; j < 255; j++) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[0]=(-zero_crossing[i].crossings[j]); for (j=255; j > 0; j--) if (zero_crossing[i].crossings[j] != 0) break; zero_crossing[i].crossings[255]=(-zero_crossing[i].crossings[j]); } /* Initialize interval tree. */ root=InitializeIntervalTree(zero_crossing,number_crossings); if (root == (IntervalTree *) NULL) return(0.0); /* Find active nodes: stability is greater (or equal) to the mean stability of its children. */ number_nodes=0; ActiveNodes(list,&number_nodes,root->child); /* Initialize extrema. */ for (i=0; i <= 255; i++) extrema[i]=0; for (i=0; i < number_nodes; i++) { /* Find this tau in zero crossings list. */ k=0; node=list[i]; for (j=0; j <= (ssize_t) number_crossings; j++) if (zero_crossing[j].tau == node->tau) k=j; /* Find the value of the peak. */ peak=zero_crossing[k].crossings[node->right] == -1 ? MagickTrue : MagickFalse; index=node->left; value=zero_crossing[k].histogram[index]; for (x=node->left; x <= node->right; x++) { if (peak != MagickFalse) { if (zero_crossing[k].histogram[x] > value) { value=zero_crossing[k].histogram[x]; index=x; } } else if (zero_crossing[k].histogram[x] < value) { value=zero_crossing[k].histogram[x]; index=x; } } for (x=node->left; x <= node->right; x++) { if (index == 0) index=256; if (peak != MagickFalse) extrema[x]=(short) index; else extrema[x]=(short) (-index); } } /* Determine the average tau. */ average_tau=0.0; for (i=0; i < number_nodes; i++) average_tau+=list[i]->tau; average_tau/=(MagickRealType) number_nodes; /* Relinquish resources. */ FreeNodes(root); zero_crossing=(ZeroCrossing *) RelinquishMagickMemory(zero_crossing); list=(IntervalTree **) RelinquishMagickMemory(list); return(average_tau); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S c a l e S p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ScaleSpace() performs a scale-space filter on the 1D histogram. % % The format of the ScaleSpace method is: % % ScaleSpace(const ssize_t *histogram,const MagickRealType tau, % MagickRealType *scale_histogram) % % A description of each parameter follows. % % o histogram: Specifies an array of MagickRealTypes representing the number % of pixels for each intensity of a particular color component. % */ static void ScaleSpace(const ssize_t *histogram,const MagickRealType tau, MagickRealType *scale_histogram) { MagickRealType alpha, beta, *gamma, sum; register ssize_t u, x; gamma=(MagickRealType *) AcquireQuantumMemory(256,sizeof(*gamma)); if (gamma == (MagickRealType *) NULL) ThrowFatalException(ResourceLimitFatalError, "UnableToAllocateGammaMap"); alpha=1.0/(tau*sqrt(2.0*MagickPI)); beta=(-1.0/(2.0*tau*tau)); for (x=0; x <= 255; x++) gamma[x]=0.0; for (x=0; x <= 255; x++) { gamma[x]=exp((double) beta*x*x); if (gamma[x] < MagickEpsilon) break; } for (x=0; x <= 255; x++) { sum=0.0; for (u=0; u <= 255; u++) sum+=(MagickRealType) histogram[u]*gamma[MagickAbsoluteValue(x-u)]; scale_histogram[x]=alpha*sum; } gamma=(MagickRealType *) RelinquishMagickMemory(gamma); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e g m e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SegmentImage() segment an image by analyzing the histograms of the color % components and identifying units that are homogeneous with the fuzzy % C-means technique. % % The format of the SegmentImage method is: % % MagickBooleanType SegmentImage(Image *image, % const ColorspaceType colorspace,const MagickBooleanType verbose, % const double cluster_threshold,const double smooth_threshold) % % A description of each parameter follows. % % o image: the image. % % o colorspace: Indicate the colorspace. % % o verbose: Set to MagickTrue to print detailed information about the % identified classes. % % o cluster_threshold: This represents the minimum number of pixels % contained in a hexahedra before it can be considered valid (expressed % as a percentage). % % o smooth_threshold: the smoothing threshold eliminates noise in the second % derivative of the histogram. As the value is increased, you can expect a % smoother second derivative. % */ MagickExport MagickBooleanType SegmentImage(Image *image, const ColorspaceType colorspace,const MagickBooleanType verbose, const double cluster_threshold,const double smooth_threshold) { MagickBooleanType status; register ssize_t i; short *extrema[MaxDimension]; ssize_t *histogram[MaxDimension]; /* Allocate histogram and extrema. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); for (i=0; i < MaxDimension; i++) { histogram[i]=(ssize_t *) AcquireQuantumMemory(256,sizeof(**histogram)); extrema[i]=(short *) AcquireQuantumMemory(256,sizeof(**extrema)); if ((histogram[i] == (ssize_t *) NULL) || (extrema[i] == (short *) NULL)) { for (i-- ; i >= 0; i--) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename) } } if (IsRGBColorspace(colorspace) == MagickFalse) (void) TransformImageColorspace(image,colorspace); /* Initialize histogram. */ InitializeHistogram(image,histogram,&image->exception); (void) OptimalTau(histogram[Red],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Red]); (void) OptimalTau(histogram[Green],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Green]); (void) OptimalTau(histogram[Blue],Tau,0.2,DeltaTau, smooth_threshold == 0.0 ? 1.0 : smooth_threshold,extrema[Blue]); /* Classify using the fuzzy c-Means technique. */ status=Classify(image,extrema,cluster_threshold,WeightingExponent,verbose); if (IsRGBColorspace(colorspace) == MagickFalse) (void) TransformImageColorspace(image,colorspace); /* Relinquish resources. */ for (i=0; i < MaxDimension; i++) { extrema[i]=(short *) RelinquishMagickMemory(extrema[i]); histogram[i]=(ssize_t *) RelinquishMagickMemory(histogram[i]); } return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + Z e r o C r o s s H i s t o g r a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZeroCrossHistogram() find the zero crossings in a histogram and marks % directions as: 1 is negative to positive; 0 is zero crossing; and -1 % is positive to negative. % % The format of the ZeroCrossHistogram method is: % % ZeroCrossHistogram(MagickRealType *second_derivative, % const MagickRealType smooth_threshold,short *crossings) % % A description of each parameter follows. % % o second_derivative: Specifies an array of MagickRealTypes representing the % second derivative of the histogram of a particular color component. % % o crossings: This array of integers is initialized with % -1, 0, or 1 representing the slope of the first derivative of the % of a particular color component. % */ static void ZeroCrossHistogram(MagickRealType *second_derivative, const MagickRealType smooth_threshold,short *crossings) { register ssize_t i; ssize_t parity; /* Merge low numbers to zero to help prevent noise. */ for (i=0; i <= 255; i++) if ((second_derivative[i] < smooth_threshold) && (second_derivative[i] >= -smooth_threshold)) second_derivative[i]=0.0; /* Mark zero crossings. */ parity=0; for (i=0; i <= 255; i++) { crossings[i]=0; if (second_derivative[i] < 0.0) { if (parity > 0) crossings[i]=(-1); parity=1; } else if (second_derivative[i] > 0.0) { if (parity < 0) crossings[i]=1; parity=(-1); } } }
mysql_fmt_plug.c
/* MYSQL_half_fmt.c * * Copyright (c) 2008 by <earthquake at rycon.hu> * * John the ripper MYSQL-fast module * * * Note: The mysql hash's first 8byte is relevant, * the another ones depends on the first 8. Maybe * the passwords after 9-10character have collision * in the first 8byte, so we have to check the full * hash. * * Unbelievable good optimization by Péter Kasza * * http://rycon.hu/ * * OpenMP support and other assorted hacks by Solar Designer */ #if FMT_EXTERNS_H extern struct fmt_main fmt_MYSQL_fast; #elif FMT_REGISTERS_H john_register_one(&fmt_MYSQL_fast); #else #include <stdio.h> #include <stdlib.h> #include <string.h> #if !FAST_FORMATS_OMP #undef _OPENMP #endif #ifdef _OPENMP #include <omp.h> #ifdef __MIC__ #ifndef OMP_SCALE #define OMP_SCALE 2048 #endif #else #ifndef OMP_SCALE #define OMP_SCALE 81920 #endif #endif #endif #include "arch.h" #include "misc.h" #include "common.h" #include "formats.h" #include "memdbg.h" #define FORMAT_LABEL "mysql" #define FORMAT_NAME "MySQL pre-4.1" #define ALGORITHM_NAME "32/" ARCH_BITS_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define PLAINTEXT_LENGTH 32 #define CIPHERTEXT_LENGTH 16 #define BINARY_SIZE 4 #define SALT_SIZE 0 #define BINARY_ALIGN sizeof(uint32_t) #define SALT_ALIGN 1 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 8 static struct fmt_tests tests[] = { // ciphertext, plaintext {"445ff82636a7ba59", "probe"}, {"60671c896665c3fa", "a"}, {"1acbed4a27b20da3", "hash"}, {"77ff75006118bab8", "hacker"}, {"1b38cd9c2f809809", "hacktivity2008"}, {"1b38cd9c2f809809", "hacktivity 2008"}, {"6fc81597422015a8", "johnmodule"}, {"30f098972cc8924d", "http://guh.nu"}, {"3fc56f6037218993", "Andrew Hintz"}, {"697a7de87c5390b2", "drew"}, {"1eb71cf460712b3e", "http://4tphi.net"}, {"28ff8d49159ffbaf", "http://violating.us"}, {"5d2e19393cc5ef67", "password"}, {"5030573512345671", ""}, {"723d80f65bf9d670", "UPPERCASE"}, {NULL} }; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; static uint32_t (*crypt_key)[BINARY_SIZE / 4]; static void init(struct fmt_main *self) { #ifdef _OPENMP int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*saved_key)); crypt_key = mem_calloc(self->params.max_keys_per_crypt, sizeof(*crypt_key)); } static void done(void) { MEM_FREE(crypt_key); MEM_FREE(saved_key); } static int valid(char* ciphertext, struct fmt_main *self) { unsigned int i; if (strnlen(ciphertext, CIPHERTEXT_LENGTH + 1) != CIPHERTEXT_LENGTH) return 0; for (i = 0; i < CIPHERTEXT_LENGTH; i++) if (atoi16[ARCH_INDEX(ciphertext[i])] > 15) return 0; return 1; } static char *split(char *ciphertext, int index, struct fmt_main *self) { static char out[CIPHERTEXT_LENGTH + 1]; memcpy(out, ciphertext, CIPHERTEXT_LENGTH); out[CIPHERTEXT_LENGTH] = 0; strlwr(out); return out; } static void *get_binary_size(char *ciphertext, int size) { /* maybe bigger than BINARY_SIZE for use from cmp_exact() */ static uint32_t buff_[8]; unsigned char *buff = (unsigned char *)buff_; unsigned int i; for (i = 0; i < size; i++) { #if ARCH_LITTLE_ENDIAN buff[(i & ~3U) | (3 - (i & 3))] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; #else buff[i] = atoi16[ARCH_INDEX(ciphertext[i * 2])] * 16 + atoi16[ARCH_INDEX(ciphertext[i * 2 + 1])]; #endif } return buff; } static void *get_binary(char *ciphertext) { return get_binary_size(ciphertext, BINARY_SIZE); } static void set_key(char* key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH + 1); } static char* get_key(int index) { return saved_key[index]; } static int cmp_one(void* binary, int index) { return *(uint32_t *)binary == crypt_key[index][0]; } static int cmp_all(void* binary, int count) { int i; #ifdef _OPENMP int retval = 0; #pragma omp parallel for default(none) private(i) shared(count, binary, crypt_key, retval) for (i = 0; i < count; i++) if (*(uint32_t *)binary == crypt_key[i][0]) #pragma omp atomic retval |= 1; return retval; #else for (i = 0; i < count; i++) if (*(uint32_t *)binary == crypt_key[i][0]) return 1; return 0; #endif } static int cmp_exact(char* source, int index) { register uint32_t nr = 1345345333, add = 7, nr2 = 0x12345671; register uint32_t tmp; unsigned char *p; p = (unsigned char *)saved_key[index]; for (; *p; p++) { if (*p == ' ' || *p == '\t') continue; tmp = (uint32_t)*p; nr ^= (((nr & 63) + add) * tmp) + (nr << 8); nr2 += (nr2 << 8) ^ nr; add += tmp; } #if 0 { char ctmp[CIPHERTEXT_LENGTH + 1]; sprintf(ctmp, "%08x%08x", nr & (((uint32_t)1 << 31) - 1), nr2 & (((uint32_t)1 << 31) - 1)); return !memcmp(source, ctmp, CIPHERTEXT_LENGTH); } #else { uint32_t *binary = get_binary_size(source, 8); return binary[0] == (nr & (((uint32_t)1 << 31) - 1)) && binary[1] == (nr2 & (((uint32_t)1 << 31) - 1)); } #endif } static int crypt_all(int *pcount, struct db_salt *salt) { int count = *pcount; int i = 0; #ifdef _OPENMP #pragma omp parallel for default(none) private(i) shared(count, saved_key, crypt_key) #endif #if MAX_KEYS_PER_CRYPT > 1 || defined(_OPENMP) for (i = 0; i < count; i++) #endif { unsigned char *p = (unsigned char *)saved_key[i]; if (*p) { uint32_t nr, add; uint32_t tmp; while (*p == ' ' || *p == '\t') p++; tmp = (uint32_t) (unsigned char) *p++; nr = 1345345333 ^ ((((1345345333 & 63) + 7) * tmp) + (1345345333U << 8)); add = 7 + tmp; for (; *p; p++) { if (*p == ' ' || *p == '\t') continue; tmp = (uint32_t) (unsigned char) *p; nr ^= (((nr & 63) + add) * tmp) + (nr << 8); add += tmp; } crypt_key[i][0] = (nr & (((uint32_t)1 << 31) - 1)); #if MAX_KEYS_PER_CRYPT > 1 || defined(_OPENMP) continue; #else return count; #endif } crypt_key[i][0] = (1345345333 & (((uint32_t)1 << 31) - 1)); } return count; } static int get_hash_0(int index) { return crypt_key[index][0] & PH_MASK_0; } static int get_hash_1(int index) { return crypt_key[index][0] & PH_MASK_1; } static int get_hash_2(int index) { return crypt_key[index][0] & PH_MASK_2; } static int get_hash_3(int index) { return crypt_key[index][0] & PH_MASK_3; } static int get_hash_4(int index) { return crypt_key[index][0] & PH_MASK_4; } static int get_hash_5(int index) { return crypt_key[index][0] & PH_MASK_5; } static int get_hash_6(int index) { return crypt_key[index][0] & PH_MASK_6; } struct fmt_main fmt_MYSQL_fast = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, #ifdef _OPENMP FMT_OMP | FMT_OMP_BAD | #endif FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE, { NULL }, { NULL }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, fmt_default_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, fmt_default_salt_hash, NULL, fmt_default_set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { get_hash_0, get_hash_1, get_hash_2, get_hash_3, get_hash_4, get_hash_5, get_hash_6 }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
3d7pt_var.c
/* * Order-1, 3D 7 point stencil with variable coefficients * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, m, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+2; Ny = atoi(argv[2])+2; Nz = atoi(argv[3])+2; } if (argc > 4) Nt = atoi(argv[4]); // allocate the arrays double ****A = (double ****) malloc(sizeof(double***)*2); for(m=0; m<2;m++){ A[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } double ****coef = (double ****) malloc(sizeof(double***)*7); for(m=0; m<7;m++){ coef[m] = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ coef[m][i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ coef[m][i][j] = (double*) malloc(sizeof(double)*Nx); } } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 32; tile_size[1] = 32; tile_size[2] = 4; tile_size[3] = 2048; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); } } } for (m=0; m<7; m++) { for (i=1; i<Nz; i++) { for (j=1; j<Ny; j++) { for (k=1; k<Nx; k++) { coef[m][i][j][k] = 1.0 * (rand() % BASE); } } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 #pragma scop for (t = 0; t < Nt-1; t++) { for (i = 1; i < Nz-1; i++) { for (j = 1; j < Ny-1; j++) { for (k = 1; k < Nx-1; k++) { A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] + coef[1][i][j][k] * A[t%2][i-1][j ][k ] + coef[2][i][j][k] * A[t%2][i ][j-1][k ] + coef[3][i][j][k] * A[t%2][i ][j ][k-1] + coef[4][i][j][k] * A[t%2][i+1][j ][k ] + coef[5][i][j][k] * A[t%2][i ][j+1][k ] + coef[6][i][j][k] * A[t%2][i ][j ][k+1]; } } } } #pragma endscop gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = min(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(1, "variable no-symmetry") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); } free(A[0][i]); free(A[1][i]); } free(A[0]); free(A[1]); for(m=0; m<7;m++){ for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(coef[m][i][j]); } free(coef[m][i]); } free(coef[m]); } return 0; }
msc.h
// Copyright (c) 2017 Francisco Troncoso Pastoriza // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. #pragma once #include <vector> #include <limits> #include <iterator> #include <type_traits> #include <stdexcept> #ifdef _OPENMP #include <omp.h> #endif namespace msc { template <class T> struct Cluster { std::vector<T> mode; std::vector<std::size_t> members; inline Cluster(const T* mode, int dim) : mode(mode, mode + dim), members() {} }; template <class T, class C> struct Accessor { inline static const T* data(const C& point) { static_assert(False<T>::value, "Accessor not implemented for container"); } private: template <class> struct False : std::false_type {}; }; template <class T, class ForwardIterator, class Metric, class Kernel, class Estimator> inline std::vector<T> mean_shift(const T* point, ForwardIterator first, ForwardIterator last, int dim, Metric metric, Kernel kernel, Estimator estimator) { if (dim <= 0) throw std::invalid_argument("Dimension must be greater than 0"); typedef typename std::iterator_traits<ForwardIterator>::value_type C; std::vector<T> shifted(dim); const auto ibw = estimator(point, first, last, dim, metric); double total_weight = 0; for (auto it = first; it != last; it++) { const T* pt = Accessor<T, C>::data(*it); const auto dist = metric(pt, point, dim); const auto weight = kernel(dist * ibw); for (std::size_t k = 0; k < shifted.size(); k++) shifted[k] += pt[k] * weight; total_weight += weight; } for (std::size_t k = 0; k < shifted.size(); k++) shifted[k] /= total_weight; return shifted; } template <class T, class ForwardIterator, class Metric, class Kernel, class Estimator> inline std::vector<std::vector<T>> mean_shift( ForwardIterator first, ForwardIterator last, int dim, Metric metric, Kernel kernel, Estimator estimator, double epsilon = std::numeric_limits<float>::epsilon(), int max_iter = std::numeric_limits<int>::max()) { if (dim <= 0) throw std::invalid_argument("Dimension must be greater than 0"); typedef typename std::iterator_traits<ForwardIterator>::value_type C; std::vector<std::vector<T>> shifted(std::distance(first, last)); std::size_t i = 0; for (auto it = first; it != last; it++, i++) { const T* pt = Accessor<T, C>::data(*it); shifted[i].resize(dim); for (int k = 0; k < dim; k++) shifted[i][k] = pt[k]; } #pragma omp parallel for for (std::size_t i = 0; i < shifted.size(); i++) { const T* pt = shifted[i].data(); int iter = 0; double d = 0; do { const auto point = mean_shift( pt, first, last, dim, metric, kernel, estimator); d = metric(pt, point.data(), dim); shifted[i] = point; iter++; } while (d > epsilon && iter < max_iter); } return shifted; } template <class T, class InputIterator, class Metric> inline std::vector<Cluster<T>> cluster_shifted( InputIterator first, InputIterator last, int dim, Metric metric, double epsilon = std::numeric_limits<float>::epsilon()) { if (dim <= 0) throw std::invalid_argument("Dimension must be greater than 0"); typedef typename std::iterator_traits<InputIterator>::value_type C; std::vector<Cluster<T>> clusters; std::size_t i = 0; for (auto it = first; it != last; it++, i++) { const T* pt = Accessor<T, C>::data(*it); std::size_t c = 0; for (; c < clusters.size(); c++) if (metric(pt, clusters[c].mode.data(), dim) <= epsilon) break; if (c == clusters.size()) clusters.emplace_back(pt, dim); clusters[c].members.emplace_back(i); } return clusters; } template <class T, class ForwardIterator, class Metric, class Kernel, class Estimator> inline std::vector<Cluster<T>> mean_shift_cluster( ForwardIterator first, ForwardIterator last, int dim, Metric metric, Kernel kernel, Estimator estimator, double epsilon = std::numeric_limits<float>::epsilon(), int max_iter = std::numeric_limits<int>::max()) { const auto shifted = mean_shift<T>( first, last, dim, metric, kernel, estimator, epsilon, max_iter); return cluster_shifted<T>( std::begin(shifted), std::end(shifted), dim, metric, epsilon); } } // namespace msc
fig4.55-critical-region.c
/* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS HEADER. Copyright 2009 Sun Microsystems, Inc. All rights reserved. The contents of this file are subject to the terms of the BSD License("BSD")(the "License"). You can obtain a copy of the License at: http://www.opensparc.net/pubs/t1/licenses/BSD+_License.txt The BSD License Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistribution of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistribution in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Sun Microsystems, Inc. or the names of contributors may be used to endorse or promote products derived from this software without specific prior written permission. This software is provided "AS IS," without a warranty of any kind. ALL EXPRESS OR IMPLIED CONDITIONS, REPRESENTATIONS AND WARRANTIES, INCLUDING ANY IMPLIED WARRANTY OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE OR NON-INFRINGEMENT, ARE HEREBY EXCLUDED. SUN MICROSYSTEMS, INC. ("SUN") AND ITS LICENSORS SHALL NOT BE LIABLE FOR ANY DAMAGES SUFFERED BY LICENSEE AS A RESULT OF USING, MODIFYING OR DISTRIBUTING THIS SOFTWARE OR ITS DERIVATIVES. IN NO EVENT WILL SUN OR ITS LICENSORS BE LIABLE FOR ANY LOST REVENUE, PROFIT OR DATA, OR FOR DIRECT, INDIRECT, SPECIAL, CONSEQUENTIAL, INCIDENTAL OR PUNITIVE DAMAGES, HOWEVER CAUSED AND REGARDLESS OF THE THEORY OF LIABILITY, ARISING OUT OF THE USE OF OR INABILITY TO USE THIS SOFTWARE, EVEN IF SUN HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. You acknowledge that this software is not designed, licensed or intended for use in the design, construction, operation or maintenance of any nuclear facility. */ #include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #define TRUE 1 #define FALSE 0 #else #define omp_get_thread_num() 0 #define omp_get_num_threads() 1 #endif #define SUM_INIT 0 int main() { int i, n = 25; int sum, TID, a[n]; int ref = SUM_INIT + (n-1)*n/2; int sumLocal; #ifdef _OPENMP (void) omp_set_dynamic(FALSE); if (omp_get_dynamic()) {printf("Warning: dynamic adjustment of threads has been set\n");} (void) omp_set_num_threads(3); #endif for (i=0; i<n; i++) a[i] = i; #pragma omp parallel { #pragma omp single printf("Number of threads is %d\n",omp_get_num_threads()); } sum = SUM_INIT; printf("Value of sum prior to parallel region: %d\n",sum); #pragma omp parallel default(none) shared(n,a,sum) \ private(TID,sumLocal) { TID = omp_get_thread_num(); sumLocal = 0; #pragma omp for for (i=0; i<n; i++) sumLocal += a[i]; #pragma omp critical (update_sum) { sum += sumLocal; printf("TID=%d: sumLocal = %d sum = %d\n",TID,sumLocal,sum); } } /*-- End of parallel region --*/ printf("Value of sum after parallel region: %d\n",sum); printf("Check results: sum = %d (should be %d)\n",sum,ref); return(0); }
GB_unop__sqrt_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__sqrt_fc64_fc64) // op(A') function: GB (_unop_tran__sqrt_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = csqrt (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = csqrt (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = csqrt (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SQRT || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__sqrt_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csqrt (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = csqrt (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__sqrt_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
quantized_conv2d.h
/* Copyright 2018 The Blueoil Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ==============================================================================*/ #ifndef DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED #define DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED #include <vector> #include <memory> #include <stdexcept> #include "tensor_view.h" #include "tensor_convert.h" #include "operators.h" #include "time_measurement.h" #include "func/impl/quantized_conv2d_tiling.h" #include "func/impl/quantized_conv2d_kn2row.h" #ifdef _OPENMP #include <omp.h> #endif template <typename T, MemoryLayout layout> void QuantizedConv2D(const TensorView<T, layout>& input, const kernel_t& kernel, binary_convolution_parameters p) { Measurement::Start("QuantizedConv2D"); constexpr T_UINT TilingInTypeBitWidth = dlk::impl::tiling_input_elem_t::BitCount; T_UINT kh = p.normal_conv_params.kernel_height; T_UINT kw = p.normal_conv_params.kernel_width; T_UINT padding = p.normal_conv_params.padding; T_UINT ih = p.normal_conv_params.input_height; T_UINT iw = p.normal_conv_params.input_width; T_UINT ic = p.normal_conv_params.kernel_depth; T_UINT oc = p.normal_conv_params.output_channels; auto size = oc * ih * iw; if (p.device_output_buf == nullptr) p.device_output_buf = new BIN_CONV_OUTPUT[size](); if ((kh == 3 && kw == 3 && padding == 1) || (kh == 1 && kw == 1 && padding == 0)) { #ifdef RUN_ON_FPGA dlk::impl::kn2row_input_t::tensor_info_t<std::size_t> shape = { (ic + QUANTIZED_PACKED::BitCount - 1) / QUANTIZED_PACKED::BitCount, ih, iw, p.bin_input_bitwidth, QUANTIZED_PACKED::BitCount }; dlk::impl::kn2row_input_t tmp(p.device_input_buf, shape); convert_tensor(input, tmp); dlk::impl::TCAConv2d(tmp, kernel, p); #elif defined USE_NEON || defined USE_AVX dlk::impl::tiling_input_t::tensor_info_t<std::size_t> shape = { ic / TilingInTypeBitWidth, ih, iw, p.bin_input_bitwidth, TilingInTypeBitWidth }; dlk::impl::tiling_input_t tmp(p.device_input_buf, shape); convert_tensor(input, tmp); dlk::impl::QuantizedConv2DTiling(tmp, kernel, p); #else dlk::impl::kn2row_input_t::tensor_info_t<std::size_t> shape = { ih, iw, ic / QUANTIZED_PACKED::BitCount, p.bin_input_bitwidth, QUANTIZED_PACKED::BitCount }; dlk::impl::kn2row_input_t tmp(p.device_input_buf, shape); convert_tensor(input, tmp); dlk::impl::QuantizedConv2DKn2Row(tmp, kernel, p); #endif } else { throw std::invalid_argument("Unsupported convolution parameter"); } Measurement::Stop(); } template <typename T, MemoryLayout layout> void func_QuantizedConv2D( const TensorView<T, layout>& input, const kernel_t& kernel, const TensorView<T_FLOAT, MemoryLayout::NHWC>& output, const T_FLOAT scaling_factor, const binary_convolution_parameters& p) { QuantizedConv2D(input, kernel, p); Measurement::Start("QuantizedConv2D_ApplyScalingFactor"); unsigned out_elems = p.normal_conv_params.output_height * p.normal_conv_params.output_width * p.normal_conv_params.output_channels; // temporary: (2^n - 1) * (max - min) const T_FLOAT post_qtz_factor = 2.0f / 3.0f; const T_FLOAT coeff = scaling_factor * post_qtz_factor; size_t b = 32; auto &ncp(p.normal_conv_params); auto true_out_channels = output.get_shape()[3]; auto channel_blocks = true_out_channels / b; size_t area = ncp.output_height * ncp.output_width; #pragma omp parallel for for (size_t hw = 0; hw < area; ++hw) { size_t out_index = hw * true_out_channels; for (size_t s = 0; s < channel_blocks; ++s) for (size_t d = 0; d < b; ++d) output.data()[out_index++] = coeff * p.device_output_buf[hw * b + s * (area * b) + d]; for (size_t d = 0; d < true_out_channels - channel_blocks*b; ++d) output.data()[out_index++] = coeff * p.device_output_buf[hw * b + channel_blocks * (area * b) + d]; } Measurement::Stop(); } template <typename T, MemoryLayout layout> void func_QuantizedConv2D( const TensorView<T, layout>& input, const kernel_t& kernel, const TensorView<T_FLOAT, MemoryLayout::NHWC>& output, T_FLOAT scaling_factor[], binary_convolution_parameters p) { QuantizedConv2D(input, kernel, p); unsigned out_elems = p.normal_conv_params.output_height * p.normal_conv_params.output_width; unsigned out_channels = p.normal_conv_params.output_channels; size_t b = 32; auto& ncp(p.normal_conv_params); auto true_out_channels = output.get_shape()[3]; auto channel_blocks = true_out_channels / b; // temporary: (2^n - 1) * (max - min) T_FLOAT post_qtz_factor = 2.0 / 3.0; Measurement::Start("QuantizedConv2D_ApplyScalingFactor"); size_t area = ncp.output_height * ncp.output_width; #pragma omp parallel for for (size_t hw = 0; hw < area; ++hw) { size_t out_index = hw * true_out_channels; for (size_t s = 0; s < channel_blocks; ++s) for (size_t d = 0; d < b; ++d) output.data()[out_index++] = (scaling_factor[s*b + d] * post_qtz_factor) * p.device_output_buf[hw * b + s * (area * b) + d]; for (size_t d = 0; d < true_out_channels - channel_blocks*b; ++d) output.data()[out_index++] = (scaling_factor[channel_blocks*b + d] * post_qtz_factor) * p.device_output_buf[hw * b + channel_blocks * (area * b) + d]; } Measurement::Stop(); } template<typename T, MemoryLayout layout> void func_QuantizedConv2DWithThreshold( const TensorView<T, layout>& input, const kernel_t& kernel, const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& output, const T_FLOAT scaling_factor, const binary_convolution_parameters& p) { QuantizedConv2D(input, kernel, p); unsigned out_elems = p.normal_conv_params.output_height * p.normal_conv_params.output_width * p.normal_conv_params.output_channels; const auto bytes = out_elems / 8 * p.n_bit; Measurement::Start("Memcpy"); #ifdef _OPENMP const int num_blocks = bytes / sizeof(QUANTIZED_PACKED); const int num_threads = omp_get_max_threads(); const int chunk_size = (num_blocks + num_threads - 1) / num_threads; #pragma omp parallel for for (int i = 0; i < num_blocks; i += chunk_size) { memcpy(output.data() + i, (QUANTIZED_PACKED*)(p.device_output_buf) + i, std::min(chunk_size, num_blocks - i) * sizeof(QUANTIZED_PACKED)); } #else memcpy(output.data(), (void*)p.device_output_buf, bytes); #endif Measurement::Stop(); } template <typename T, MemoryLayout layout> void func_QuantizedConv2DWithThreshold( const TensorView<T, layout>& input, const kernel_t& kernel, const TensorView<T_FLOAT, MemoryLayout::NHWC>& output, const T_FLOAT scaling_factor, const binary_convolution_parameters& p) { QuantizedConv2D(input, kernel, p); Measurement::Start("linear_to_float"); T_FLOAT n = (1 << p.n_bit) - 1; const auto& np = p.normal_conv_params; const auto out_height = np.output_height; const auto out_width = np.output_width; const auto out_channels = np.output_channels; const auto true_out_channels = output.get_shape()[3]; QUANTIZED_PACKED::base_t* ptr = (QUANTIZED_PACKED::base_t*)p.device_output_buf; for (unsigned r = 0; r < out_height; ++r) { for (unsigned c = 0; c < out_width; ++c) { for (unsigned d = 0; d < true_out_channels; ++d) { const auto i = r * out_width * p.n_bit + c * p.n_bit; QUANTIZED_PACKED::base_t bits = 0; for (unsigned digit = 0; digit < p.n_bit; ++digit) { bits |= ((ptr[i + digit] >> d) & 1) << digit; } T_FLOAT tmp = (T_FLOAT)bits; tmp = tmp / n; output(0, r, c, d) = tmp * p.max_value; } } } Measurement::Stop(); } template <typename T, MemoryLayout layout> void func_QuantizedConv2DWithThreshold( const TensorView<T, layout>& input, const kernel_t& kernel, const TensorView<QUANTIZED_PACKED, MemoryLayout::ChHWBCl>& output, const T_FLOAT scaling_factor[], const binary_convolution_parameters& p) { func_QuantizedConv2DWithThreshold(input, kernel, output, scaling_factor[0], p); } template <typename T, MemoryLayout layout> void func_QuantizedConv2DWithThreshold( const TensorView<T, layout>& input, const kernel_t& kernel, const TensorView<T_FLOAT, MemoryLayout::NHWC>& output, T_FLOAT scaling_factor[], binary_convolution_parameters p) { func_QuantizedConv2DWithThreshold(input, kernel, output, scaling_factor[0], p); } #endif // DLK_FUNC_QUANTIZED_CONV2D_H_INCLUDED
DRB045-doall1-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Simplest one dimension array computation */ #include "omprace.h" #include <omp.h> int a[100]; int main() { omprace_init(); int i; #pragma omp parallel for for (i=0;i<100;i++) a[i]=a[i]+1; omprace_fini(); return 0; }
profile.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR OOO FFFFF IIIII L EEEEE % % P P R R O O F I L E % % PPPP RRRR O O FFF I L EEE % % P R R O O F I L E % % P R R OOO F IIIII LLLLL EEEEE % % % % % % MagickCore Image Profile Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2021 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/image.h" #include "MagickCore/linked-list.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/option-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/profile.h" #include "MagickCore/profile-private.h" #include "MagickCore/property.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resource_.h" #include "MagickCore/splay-tree.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #if defined(MAGICKCORE_LCMS_DELEGATE) #if defined(MAGICKCORE_HAVE_LCMS_LCMS2_H) #include <wchar.h> #include <lcms/lcms2.h> #else #include <wchar.h> #include "lcms2.h" #endif #endif #if defined(MAGICKCORE_XML_DELEGATE) # if defined(MAGICKCORE_WINDOWS_SUPPORT) # if !defined(__MINGW32__) # include <win32config.h> # endif # endif # include <libxml/parser.h> # include <libxml/tree.h> #endif /* Forward declarations */ static MagickBooleanType SetImageProfileInternal(Image *,const char *,const StringInfo *, const MagickBooleanType,ExceptionInfo *); static void WriteTo8BimProfile(Image *,const char*,const StringInfo *); /* Typedef declarations */ struct _ProfileInfo { char *name; size_t length; unsigned char *info; size_t signature; }; typedef struct _CMSExceptionInfo { Image *image; ExceptionInfo *exception; } CMSExceptionInfo; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageProfiles() clones one or more image profiles. % % The format of the CloneImageProfiles method is: % % MagickBooleanType CloneImageProfiles(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageProfiles(Image *image, const Image *clone_image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clone_image != (const Image *) NULL); assert(clone_image->signature == MagickCoreSignature); if (clone_image->profiles != (void *) NULL) { if (image->profiles != (void *) NULL) DestroyImageProfiles(image); image->profiles=CloneSplayTree((SplayTreeInfo *) clone_image->profiles, (void *(*)(void *)) ConstantString,(void *(*)(void *)) CloneStringInfo); } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageProfile() deletes a profile from the image by its name. % % The format of the DeleteImageProfile method is: % % MagickBooleanTyupe DeleteImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport MagickBooleanType DeleteImageProfile(Image *image,const char *name) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return(MagickFalse); WriteTo8BimProfile(image,name,(StringInfo *) NULL); return(DeleteNodeFromSplayTree((SplayTreeInfo *) image->profiles,name)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageProfiles() releases memory associated with an image profile map. % % The format of the DestroyProfiles method is: % % void DestroyImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageProfiles(Image *image) { if (image->profiles != (SplayTreeInfo *) NULL) image->profiles=DestroySplayTree((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageProfile() gets a profile associated with an image by name. % % The format of the GetImageProfile method is: % % const StringInfo *GetImageProfile(const Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport const StringInfo *GetImageProfile(const Image *image, const char *name) { const StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); profile=(const StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageProfile() gets the next profile name for an image. % % The format of the GetNextImageProfile method is: % % char *GetNextImageProfile(const Image *image) % % A description of each parameter follows: % % o hash_info: the hash info. % */ MagickExport char *GetNextImageProfile(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((char *) NULL); return((char *) GetNextKeyInSplayTree((SplayTreeInfo *) image->profiles)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P r o f i l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ProfileImage() associates, applies, or removes an ICM, IPTC, or generic % profile with / to / from an image. If the profile is NULL, it is removed % from the image otherwise added or applied. Use a name of '*' and a profile % of NULL to remove all profiles from the image. % % ICC and ICM profiles are handled as follows: If the image does not have % an associated color profile, the one you provide is associated with the % image and the image pixels are not transformed. Otherwise, the colorspace % transform defined by the existing and new profile are applied to the image % pixels and the new profile is associated with the image. % % The format of the ProfileImage method is: % % MagickBooleanType ProfileImage(Image *image,const char *name, % const void *datum,const size_t length,const MagickBooleanType clone) % % A description of each parameter follows: % % o image: the image. % % o name: Name of profile to add or remove: ICC, IPTC, or generic profile. % % o datum: the profile data. % % o length: the length of the profile. % % o clone: should be MagickFalse. % */ #if defined(MAGICKCORE_LCMS_DELEGATE) typedef struct _LCMSInfo { ColorspaceType colorspace; cmsUInt32Number type; size_t channels; cmsHPROFILE profile; int intent; double scale[4], translate[4]; void **magick_restrict pixels; } LCMSInfo; #if LCMS_VERSION < 2060 static void* cmsGetContextUserData(cmsContext ContextID) { return(ContextID); } static cmsContext cmsCreateContext(void *magick_unused(Plugin),void *UserData) { magick_unreferenced(Plugin); return((cmsContext) UserData); } static void cmsSetLogErrorHandlerTHR(cmsContext magick_unused(ContextID), cmsLogErrorHandlerFunction Fn) { magick_unreferenced(ContextID); cmsSetLogErrorHandler(Fn); } static void cmsDeleteContext(cmsContext magick_unused(ContextID)) { magick_unreferenced(ContextID); } #endif static void **DestroyPixelThreadSet(void **pixels) { ssize_t i; if (pixels == (void **) NULL) return((void **) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (pixels[i] != (void *) NULL) pixels[i]=RelinquishMagickMemory(pixels[i]); pixels=(void **) RelinquishMagickMemory(pixels); return(pixels); } static void **AcquirePixelThreadSet(const size_t columns,const size_t channels, MagickBooleanType highres) { ssize_t i; size_t number_threads; size_t size; void **pixels; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); pixels=(void **) AcquireQuantumMemory(number_threads,sizeof(*pixels)); if (pixels == (void **) NULL) return((void **) NULL); (void) memset(pixels,0,number_threads*sizeof(*pixels)); size=sizeof(double); if (highres == MagickFalse) size=sizeof(Quantum); for (i=0; i < (ssize_t) number_threads; i++) { pixels[i]=AcquireQuantumMemory(columns,channels*size); if (pixels[i] == (void *) NULL) return(DestroyPixelThreadSet(pixels)); } return(pixels); } static cmsHTRANSFORM *DestroyTransformThreadSet(cmsHTRANSFORM *transform) { ssize_t i; assert(transform != (cmsHTRANSFORM *) NULL); for (i=0; i < (ssize_t) GetMagickResourceLimit(ThreadResource); i++) if (transform[i] != (cmsHTRANSFORM) NULL) cmsDeleteTransform(transform[i]); transform=(cmsHTRANSFORM *) RelinquishMagickMemory(transform); return(transform); } static cmsHTRANSFORM *AcquireTransformThreadSet(const LCMSInfo *source_info, const LCMSInfo *target_info,const cmsUInt32Number flags, cmsContext cms_context) { cmsHTRANSFORM *transform; size_t number_threads; ssize_t i; number_threads=(size_t) GetMagickResourceLimit(ThreadResource); transform=(cmsHTRANSFORM *) AcquireQuantumMemory(number_threads, sizeof(*transform)); if (transform == (cmsHTRANSFORM *) NULL) return((cmsHTRANSFORM *) NULL); (void) memset(transform,0,number_threads*sizeof(*transform)); for (i=0; i < (ssize_t) number_threads; i++) { transform[i]=cmsCreateTransformTHR(cms_context,source_info->profile, source_info->type,target_info->profile,target_info->type, target_info->intent,flags); if (transform[i] == (cmsHTRANSFORM) NULL) return(DestroyTransformThreadSet(transform)); } return(transform); } static void CMSExceptionHandler(cmsContext context,cmsUInt32Number severity, const char *message) { CMSExceptionInfo *cms_exception; ExceptionInfo *exception; Image *image; cms_exception=(CMSExceptionInfo *) cmsGetContextUserData(context); if (cms_exception == (CMSExceptionInfo *) NULL) return; exception=cms_exception->exception; if (exception == (ExceptionInfo *) NULL) return; image=cms_exception->image; if (image == (Image *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s'","unknown context"); return; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(),"lcms: #%u, %s", severity,message != (char *) NULL ? message : "no message"); (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "UnableToTransformColorspace","`%s', %s (#%u)",image->filename, message != (char *) NULL ? message : "no message",severity); } static void TransformDoublePixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { #define GetLCMSPixel(source_info,pixel,index) \ (source_info->scale[index]*((QuantumScale*pixel)+source_info->translate[index])) #define SetLCMSPixel(target_info,pixel,index) \ ClampToQuantum(target_info->scale[index]*((QuantumRange*pixel)+target_info->translate[index])) double *p; ssize_t x; p=(double *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetLCMSPixel(source_info,GetPixelRed(image,q),0); if (source_info->channels > 1) { *p++=GetLCMSPixel(source_info,GetPixelGreen(image,q),1); *p++=GetLCMSPixel(source_info,GetPixelBlue(image,q),2); } if (source_info->channels > 3) *p++=GetLCMSPixel(source_info,GetPixelBlack(image,q),3); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id],target_info->pixels[id], (unsigned int) image->columns); p=(double *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,SetLCMSPixel(target_info,*p,0),q); else SetPixelRed(image,SetLCMSPixel(target_info,*p,0),q); p++; if (target_info->channels > 1) { SetPixelGreen(image,SetLCMSPixel(target_info,*p,1),q); p++; SetPixelBlue(image,SetLCMSPixel(target_info,*p,2),q); p++; } if (target_info->channels > 3) { SetPixelBlack(image,SetLCMSPixel(target_info,*p,3),q); p++; } q+=GetPixelChannels(image); } } static void TransformQuantumPixels(const int id,const Image* image, const LCMSInfo *source_info,const LCMSInfo *target_info, const cmsHTRANSFORM *transform,Quantum *q) { Quantum *p; ssize_t x; p=(Quantum *) source_info->pixels[id]; for (x=0; x < (ssize_t) image->columns; x++) { *p++=GetPixelRed(image,q); if (source_info->channels > 1) { *p++=GetPixelGreen(image,q); *p++=GetPixelBlue(image,q); } if (source_info->channels > 3) *p++=GetPixelBlack(image,q); q+=GetPixelChannels(image); } cmsDoTransform(transform[id],source_info->pixels[id],target_info->pixels[id], (unsigned int) image->columns); p=(Quantum *) target_info->pixels[id]; q-=GetPixelChannels(image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { if (target_info->channels == 1) SetPixelGray(image,*p++,q); else SetPixelRed(image,*p++,q); if (target_info->channels > 1) { SetPixelGreen(image,*p++,q); SetPixelBlue(image,*p++,q); } if (target_info->channels > 3) SetPixelBlack(image,*p++,q); q+=GetPixelChannels(image); } } static inline void SetLCMSInfoTranslate(LCMSInfo *info,const double translate) { info->translate[0]=translate; info->translate[1]=translate; info->translate[2]=translate; info->translate[3]=translate; } static inline void SetLCMSInfoScale(LCMSInfo *info,const double scale) { info->scale[0]=scale; info->scale[1]=scale; info->scale[2]=scale; info->scale[3]=scale; } #endif static MagickBooleanType SetsRGBImageProfile(Image *image, ExceptionInfo *exception) { static unsigned char sRGBProfile[] = { 0x00, 0x00, 0x0c, 0x8c, 0x61, 0x72, 0x67, 0x6c, 0x02, 0x20, 0x00, 0x00, 0x6d, 0x6e, 0x74, 0x72, 0x52, 0x47, 0x42, 0x20, 0x58, 0x59, 0x5a, 0x20, 0x07, 0xde, 0x00, 0x01, 0x00, 0x06, 0x00, 0x16, 0x00, 0x0f, 0x00, 0x3a, 0x61, 0x63, 0x73, 0x70, 0x4d, 0x53, 0x46, 0x54, 0x00, 0x00, 0x00, 0x00, 0x49, 0x45, 0x43, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xd6, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xd3, 0x2d, 0x61, 0x72, 0x67, 0x6c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x01, 0x50, 0x00, 0x00, 0x00, 0x99, 0x63, 0x70, 0x72, 0x74, 0x00, 0x00, 0x01, 0xec, 0x00, 0x00, 0x00, 0x67, 0x64, 0x6d, 0x6e, 0x64, 0x00, 0x00, 0x02, 0x54, 0x00, 0x00, 0x00, 0x70, 0x64, 0x6d, 0x64, 0x64, 0x00, 0x00, 0x02, 0xc4, 0x00, 0x00, 0x00, 0x88, 0x74, 0x65, 0x63, 0x68, 0x00, 0x00, 0x03, 0x4c, 0x00, 0x00, 0x00, 0x0c, 0x76, 0x75, 0x65, 0x64, 0x00, 0x00, 0x03, 0x58, 0x00, 0x00, 0x00, 0x67, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x03, 0xc0, 0x00, 0x00, 0x00, 0x24, 0x6c, 0x75, 0x6d, 0x69, 0x00, 0x00, 0x03, 0xe4, 0x00, 0x00, 0x00, 0x14, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x03, 0xf8, 0x00, 0x00, 0x00, 0x24, 0x77, 0x74, 0x70, 0x74, 0x00, 0x00, 0x04, 0x1c, 0x00, 0x00, 0x00, 0x14, 0x62, 0x6b, 0x70, 0x74, 0x00, 0x00, 0x04, 0x30, 0x00, 0x00, 0x00, 0x14, 0x72, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x44, 0x00, 0x00, 0x00, 0x14, 0x67, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x58, 0x00, 0x00, 0x00, 0x14, 0x62, 0x58, 0x59, 0x5a, 0x00, 0x00, 0x04, 0x6c, 0x00, 0x00, 0x00, 0x14, 0x72, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x67, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x62, 0x54, 0x52, 0x43, 0x00, 0x00, 0x04, 0x80, 0x00, 0x00, 0x08, 0x0c, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3f, 0x73, 0x52, 0x47, 0x42, 0x20, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x28, 0x45, 0x71, 0x75, 0x69, 0x76, 0x61, 0x6c, 0x65, 0x6e, 0x74, 0x20, 0x74, 0x6f, 0x20, 0x77, 0x77, 0x77, 0x2e, 0x73, 0x72, 0x67, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x20, 0x31, 0x39, 0x39, 0x38, 0x20, 0x48, 0x50, 0x20, 0x70, 0x72, 0x6f, 0x66, 0x69, 0x6c, 0x65, 0x29, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x74, 0x65, 0x78, 0x74, 0x00, 0x00, 0x00, 0x00, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x20, 0x62, 0x79, 0x20, 0x47, 0x72, 0x61, 0x65, 0x6d, 0x65, 0x20, 0x57, 0x2e, 0x20, 0x47, 0x69, 0x6c, 0x6c, 0x2e, 0x20, 0x52, 0x65, 0x6c, 0x65, 0x61, 0x73, 0x65, 0x64, 0x20, 0x69, 0x6e, 0x74, 0x6f, 0x20, 0x74, 0x68, 0x65, 0x20, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x20, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x2e, 0x20, 0x4e, 0x6f, 0x20, 0x57, 0x61, 0x72, 0x72, 0x61, 0x6e, 0x74, 0x79, 0x2c, 0x20, 0x55, 0x73, 0x65, 0x20, 0x61, 0x74, 0x20, 0x79, 0x6f, 0x75, 0x72, 0x20, 0x6f, 0x77, 0x6e, 0x20, 0x72, 0x69, 0x73, 0x6b, 0x2e, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x16, 0x49, 0x45, 0x43, 0x20, 0x68, 0x74, 0x74, 0x70, 0x3a, 0x2f, 0x2f, 0x77, 0x77, 0x77, 0x2e, 0x69, 0x65, 0x63, 0x2e, 0x63, 0x68, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x2e, 0x49, 0x45, 0x43, 0x20, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x20, 0x44, 0x65, 0x66, 0x61, 0x75, 0x6c, 0x74, 0x20, 0x52, 0x47, 0x42, 0x20, 0x63, 0x6f, 0x6c, 0x6f, 0x75, 0x72, 0x20, 0x73, 0x70, 0x61, 0x63, 0x65, 0x20, 0x2d, 0x20, 0x73, 0x52, 0x47, 0x42, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x73, 0x69, 0x67, 0x20, 0x00, 0x00, 0x00, 0x00, 0x43, 0x52, 0x54, 0x20, 0x64, 0x65, 0x73, 0x63, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x49, 0x45, 0x43, 0x36, 0x31, 0x39, 0x36, 0x36, 0x2d, 0x32, 0x2e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x76, 0x69, 0x65, 0x77, 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0xa4, 0x7c, 0x00, 0x14, 0x5f, 0x30, 0x00, 0x10, 0xce, 0x02, 0x00, 0x03, 0xed, 0xb2, 0x00, 0x04, 0x13, 0x0a, 0x00, 0x03, 0x5c, 0x67, 0x00, 0x00, 0x00, 0x01, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4c, 0x0a, 0x3d, 0x00, 0x50, 0x00, 0x00, 0x00, 0x57, 0x1e, 0xb8, 0x6d, 0x65, 0x61, 0x73, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x8f, 0x00, 0x00, 0x00, 0x02, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf3, 0x51, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01, 0x16, 0xcc, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x6f, 0xa0, 0x00, 0x00, 0x38, 0xf5, 0x00, 0x00, 0x03, 0x90, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x62, 0x97, 0x00, 0x00, 0xb7, 0x87, 0x00, 0x00, 0x18, 0xd9, 0x58, 0x59, 0x5a, 0x20, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x24, 0x9f, 0x00, 0x00, 0x0f, 0x84, 0x00, 0x00, 0xb6, 0xc4, 0x63, 0x75, 0x72, 0x76, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x0a, 0x00, 0x0f, 0x00, 0x14, 0x00, 0x19, 0x00, 0x1e, 0x00, 0x23, 0x00, 0x28, 0x00, 0x2d, 0x00, 0x32, 0x00, 0x37, 0x00, 0x3b, 0x00, 0x40, 0x00, 0x45, 0x00, 0x4a, 0x00, 0x4f, 0x00, 0x54, 0x00, 0x59, 0x00, 0x5e, 0x00, 0x63, 0x00, 0x68, 0x00, 0x6d, 0x00, 0x72, 0x00, 0x77, 0x00, 0x7c, 0x00, 0x81, 0x00, 0x86, 0x00, 0x8b, 0x00, 0x90, 0x00, 0x95, 0x00, 0x9a, 0x00, 0x9f, 0x00, 0xa4, 0x00, 0xa9, 0x00, 0xae, 0x00, 0xb2, 0x00, 0xb7, 0x00, 0xbc, 0x00, 0xc1, 0x00, 0xc6, 0x00, 0xcb, 0x00, 0xd0, 0x00, 0xd5, 0x00, 0xdb, 0x00, 0xe0, 0x00, 0xe5, 0x00, 0xeb, 0x00, 0xf0, 0x00, 0xf6, 0x00, 0xfb, 0x01, 0x01, 0x01, 0x07, 0x01, 0x0d, 0x01, 0x13, 0x01, 0x19, 0x01, 0x1f, 0x01, 0x25, 0x01, 0x2b, 0x01, 0x32, 0x01, 0x38, 0x01, 0x3e, 0x01, 0x45, 0x01, 0x4c, 0x01, 0x52, 0x01, 0x59, 0x01, 0x60, 0x01, 0x67, 0x01, 0x6e, 0x01, 0x75, 0x01, 0x7c, 0x01, 0x83, 0x01, 0x8b, 0x01, 0x92, 0x01, 0x9a, 0x01, 0xa1, 0x01, 0xa9, 0x01, 0xb1, 0x01, 0xb9, 0x01, 0xc1, 0x01, 0xc9, 0x01, 0xd1, 0x01, 0xd9, 0x01, 0xe1, 0x01, 0xe9, 0x01, 0xf2, 0x01, 0xfa, 0x02, 0x03, 0x02, 0x0c, 0x02, 0x14, 0x02, 0x1d, 0x02, 0x26, 0x02, 0x2f, 0x02, 0x38, 0x02, 0x41, 0x02, 0x4b, 0x02, 0x54, 0x02, 0x5d, 0x02, 0x67, 0x02, 0x71, 0x02, 0x7a, 0x02, 0x84, 0x02, 0x8e, 0x02, 0x98, 0x02, 0xa2, 0x02, 0xac, 0x02, 0xb6, 0x02, 0xc1, 0x02, 0xcb, 0x02, 0xd5, 0x02, 0xe0, 0x02, 0xeb, 0x02, 0xf5, 0x03, 0x00, 0x03, 0x0b, 0x03, 0x16, 0x03, 0x21, 0x03, 0x2d, 0x03, 0x38, 0x03, 0x43, 0x03, 0x4f, 0x03, 0x5a, 0x03, 0x66, 0x03, 0x72, 0x03, 0x7e, 0x03, 0x8a, 0x03, 0x96, 0x03, 0xa2, 0x03, 0xae, 0x03, 0xba, 0x03, 0xc7, 0x03, 0xd3, 0x03, 0xe0, 0x03, 0xec, 0x03, 0xf9, 0x04, 0x06, 0x04, 0x13, 0x04, 0x20, 0x04, 0x2d, 0x04, 0x3b, 0x04, 0x48, 0x04, 0x55, 0x04, 0x63, 0x04, 0x71, 0x04, 0x7e, 0x04, 0x8c, 0x04, 0x9a, 0x04, 0xa8, 0x04, 0xb6, 0x04, 0xc4, 0x04, 0xd3, 0x04, 0xe1, 0x04, 0xf0, 0x04, 0xfe, 0x05, 0x0d, 0x05, 0x1c, 0x05, 0x2b, 0x05, 0x3a, 0x05, 0x49, 0x05, 0x58, 0x05, 0x67, 0x05, 0x77, 0x05, 0x86, 0x05, 0x96, 0x05, 0xa6, 0x05, 0xb5, 0x05, 0xc5, 0x05, 0xd5, 0x05, 0xe5, 0x05, 0xf6, 0x06, 0x06, 0x06, 0x16, 0x06, 0x27, 0x06, 0x37, 0x06, 0x48, 0x06, 0x59, 0x06, 0x6a, 0x06, 0x7b, 0x06, 0x8c, 0x06, 0x9d, 0x06, 0xaf, 0x06, 0xc0, 0x06, 0xd1, 0x06, 0xe3, 0x06, 0xf5, 0x07, 0x07, 0x07, 0x19, 0x07, 0x2b, 0x07, 0x3d, 0x07, 0x4f, 0x07, 0x61, 0x07, 0x74, 0x07, 0x86, 0x07, 0x99, 0x07, 0xac, 0x07, 0xbf, 0x07, 0xd2, 0x07, 0xe5, 0x07, 0xf8, 0x08, 0x0b, 0x08, 0x1f, 0x08, 0x32, 0x08, 0x46, 0x08, 0x5a, 0x08, 0x6e, 0x08, 0x82, 0x08, 0x96, 0x08, 0xaa, 0x08, 0xbe, 0x08, 0xd2, 0x08, 0xe7, 0x08, 0xfb, 0x09, 0x10, 0x09, 0x25, 0x09, 0x3a, 0x09, 0x4f, 0x09, 0x64, 0x09, 0x79, 0x09, 0x8f, 0x09, 0xa4, 0x09, 0xba, 0x09, 0xcf, 0x09, 0xe5, 0x09, 0xfb, 0x0a, 0x11, 0x0a, 0x27, 0x0a, 0x3d, 0x0a, 0x54, 0x0a, 0x6a, 0x0a, 0x81, 0x0a, 0x98, 0x0a, 0xae, 0x0a, 0xc5, 0x0a, 0xdc, 0x0a, 0xf3, 0x0b, 0x0b, 0x0b, 0x22, 0x0b, 0x39, 0x0b, 0x51, 0x0b, 0x69, 0x0b, 0x80, 0x0b, 0x98, 0x0b, 0xb0, 0x0b, 0xc8, 0x0b, 0xe1, 0x0b, 0xf9, 0x0c, 0x12, 0x0c, 0x2a, 0x0c, 0x43, 0x0c, 0x5c, 0x0c, 0x75, 0x0c, 0x8e, 0x0c, 0xa7, 0x0c, 0xc0, 0x0c, 0xd9, 0x0c, 0xf3, 0x0d, 0x0d, 0x0d, 0x26, 0x0d, 0x40, 0x0d, 0x5a, 0x0d, 0x74, 0x0d, 0x8e, 0x0d, 0xa9, 0x0d, 0xc3, 0x0d, 0xde, 0x0d, 0xf8, 0x0e, 0x13, 0x0e, 0x2e, 0x0e, 0x49, 0x0e, 0x64, 0x0e, 0x7f, 0x0e, 0x9b, 0x0e, 0xb6, 0x0e, 0xd2, 0x0e, 0xee, 0x0f, 0x09, 0x0f, 0x25, 0x0f, 0x41, 0x0f, 0x5e, 0x0f, 0x7a, 0x0f, 0x96, 0x0f, 0xb3, 0x0f, 0xcf, 0x0f, 0xec, 0x10, 0x09, 0x10, 0x26, 0x10, 0x43, 0x10, 0x61, 0x10, 0x7e, 0x10, 0x9b, 0x10, 0xb9, 0x10, 0xd7, 0x10, 0xf5, 0x11, 0x13, 0x11, 0x31, 0x11, 0x4f, 0x11, 0x6d, 0x11, 0x8c, 0x11, 0xaa, 0x11, 0xc9, 0x11, 0xe8, 0x12, 0x07, 0x12, 0x26, 0x12, 0x45, 0x12, 0x64, 0x12, 0x84, 0x12, 0xa3, 0x12, 0xc3, 0x12, 0xe3, 0x13, 0x03, 0x13, 0x23, 0x13, 0x43, 0x13, 0x63, 0x13, 0x83, 0x13, 0xa4, 0x13, 0xc5, 0x13, 0xe5, 0x14, 0x06, 0x14, 0x27, 0x14, 0x49, 0x14, 0x6a, 0x14, 0x8b, 0x14, 0xad, 0x14, 0xce, 0x14, 0xf0, 0x15, 0x12, 0x15, 0x34, 0x15, 0x56, 0x15, 0x78, 0x15, 0x9b, 0x15, 0xbd, 0x15, 0xe0, 0x16, 0x03, 0x16, 0x26, 0x16, 0x49, 0x16, 0x6c, 0x16, 0x8f, 0x16, 0xb2, 0x16, 0xd6, 0x16, 0xfa, 0x17, 0x1d, 0x17, 0x41, 0x17, 0x65, 0x17, 0x89, 0x17, 0xae, 0x17, 0xd2, 0x17, 0xf7, 0x18, 0x1b, 0x18, 0x40, 0x18, 0x65, 0x18, 0x8a, 0x18, 0xaf, 0x18, 0xd5, 0x18, 0xfa, 0x19, 0x20, 0x19, 0x45, 0x19, 0x6b, 0x19, 0x91, 0x19, 0xb7, 0x19, 0xdd, 0x1a, 0x04, 0x1a, 0x2a, 0x1a, 0x51, 0x1a, 0x77, 0x1a, 0x9e, 0x1a, 0xc5, 0x1a, 0xec, 0x1b, 0x14, 0x1b, 0x3b, 0x1b, 0x63, 0x1b, 0x8a, 0x1b, 0xb2, 0x1b, 0xda, 0x1c, 0x02, 0x1c, 0x2a, 0x1c, 0x52, 0x1c, 0x7b, 0x1c, 0xa3, 0x1c, 0xcc, 0x1c, 0xf5, 0x1d, 0x1e, 0x1d, 0x47, 0x1d, 0x70, 0x1d, 0x99, 0x1d, 0xc3, 0x1d, 0xec, 0x1e, 0x16, 0x1e, 0x40, 0x1e, 0x6a, 0x1e, 0x94, 0x1e, 0xbe, 0x1e, 0xe9, 0x1f, 0x13, 0x1f, 0x3e, 0x1f, 0x69, 0x1f, 0x94, 0x1f, 0xbf, 0x1f, 0xea, 0x20, 0x15, 0x20, 0x41, 0x20, 0x6c, 0x20, 0x98, 0x20, 0xc4, 0x20, 0xf0, 0x21, 0x1c, 0x21, 0x48, 0x21, 0x75, 0x21, 0xa1, 0x21, 0xce, 0x21, 0xfb, 0x22, 0x27, 0x22, 0x55, 0x22, 0x82, 0x22, 0xaf, 0x22, 0xdd, 0x23, 0x0a, 0x23, 0x38, 0x23, 0x66, 0x23, 0x94, 0x23, 0xc2, 0x23, 0xf0, 0x24, 0x1f, 0x24, 0x4d, 0x24, 0x7c, 0x24, 0xab, 0x24, 0xda, 0x25, 0x09, 0x25, 0x38, 0x25, 0x68, 0x25, 0x97, 0x25, 0xc7, 0x25, 0xf7, 0x26, 0x27, 0x26, 0x57, 0x26, 0x87, 0x26, 0xb7, 0x26, 0xe8, 0x27, 0x18, 0x27, 0x49, 0x27, 0x7a, 0x27, 0xab, 0x27, 0xdc, 0x28, 0x0d, 0x28, 0x3f, 0x28, 0x71, 0x28, 0xa2, 0x28, 0xd4, 0x29, 0x06, 0x29, 0x38, 0x29, 0x6b, 0x29, 0x9d, 0x29, 0xd0, 0x2a, 0x02, 0x2a, 0x35, 0x2a, 0x68, 0x2a, 0x9b, 0x2a, 0xcf, 0x2b, 0x02, 0x2b, 0x36, 0x2b, 0x69, 0x2b, 0x9d, 0x2b, 0xd1, 0x2c, 0x05, 0x2c, 0x39, 0x2c, 0x6e, 0x2c, 0xa2, 0x2c, 0xd7, 0x2d, 0x0c, 0x2d, 0x41, 0x2d, 0x76, 0x2d, 0xab, 0x2d, 0xe1, 0x2e, 0x16, 0x2e, 0x4c, 0x2e, 0x82, 0x2e, 0xb7, 0x2e, 0xee, 0x2f, 0x24, 0x2f, 0x5a, 0x2f, 0x91, 0x2f, 0xc7, 0x2f, 0xfe, 0x30, 0x35, 0x30, 0x6c, 0x30, 0xa4, 0x30, 0xdb, 0x31, 0x12, 0x31, 0x4a, 0x31, 0x82, 0x31, 0xba, 0x31, 0xf2, 0x32, 0x2a, 0x32, 0x63, 0x32, 0x9b, 0x32, 0xd4, 0x33, 0x0d, 0x33, 0x46, 0x33, 0x7f, 0x33, 0xb8, 0x33, 0xf1, 0x34, 0x2b, 0x34, 0x65, 0x34, 0x9e, 0x34, 0xd8, 0x35, 0x13, 0x35, 0x4d, 0x35, 0x87, 0x35, 0xc2, 0x35, 0xfd, 0x36, 0x37, 0x36, 0x72, 0x36, 0xae, 0x36, 0xe9, 0x37, 0x24, 0x37, 0x60, 0x37, 0x9c, 0x37, 0xd7, 0x38, 0x14, 0x38, 0x50, 0x38, 0x8c, 0x38, 0xc8, 0x39, 0x05, 0x39, 0x42, 0x39, 0x7f, 0x39, 0xbc, 0x39, 0xf9, 0x3a, 0x36, 0x3a, 0x74, 0x3a, 0xb2, 0x3a, 0xef, 0x3b, 0x2d, 0x3b, 0x6b, 0x3b, 0xaa, 0x3b, 0xe8, 0x3c, 0x27, 0x3c, 0x65, 0x3c, 0xa4, 0x3c, 0xe3, 0x3d, 0x22, 0x3d, 0x61, 0x3d, 0xa1, 0x3d, 0xe0, 0x3e, 0x20, 0x3e, 0x60, 0x3e, 0xa0, 0x3e, 0xe0, 0x3f, 0x21, 0x3f, 0x61, 0x3f, 0xa2, 0x3f, 0xe2, 0x40, 0x23, 0x40, 0x64, 0x40, 0xa6, 0x40, 0xe7, 0x41, 0x29, 0x41, 0x6a, 0x41, 0xac, 0x41, 0xee, 0x42, 0x30, 0x42, 0x72, 0x42, 0xb5, 0x42, 0xf7, 0x43, 0x3a, 0x43, 0x7d, 0x43, 0xc0, 0x44, 0x03, 0x44, 0x47, 0x44, 0x8a, 0x44, 0xce, 0x45, 0x12, 0x45, 0x55, 0x45, 0x9a, 0x45, 0xde, 0x46, 0x22, 0x46, 0x67, 0x46, 0xab, 0x46, 0xf0, 0x47, 0x35, 0x47, 0x7b, 0x47, 0xc0, 0x48, 0x05, 0x48, 0x4b, 0x48, 0x91, 0x48, 0xd7, 0x49, 0x1d, 0x49, 0x63, 0x49, 0xa9, 0x49, 0xf0, 0x4a, 0x37, 0x4a, 0x7d, 0x4a, 0xc4, 0x4b, 0x0c, 0x4b, 0x53, 0x4b, 0x9a, 0x4b, 0xe2, 0x4c, 0x2a, 0x4c, 0x72, 0x4c, 0xba, 0x4d, 0x02, 0x4d, 0x4a, 0x4d, 0x93, 0x4d, 0xdc, 0x4e, 0x25, 0x4e, 0x6e, 0x4e, 0xb7, 0x4f, 0x00, 0x4f, 0x49, 0x4f, 0x93, 0x4f, 0xdd, 0x50, 0x27, 0x50, 0x71, 0x50, 0xbb, 0x51, 0x06, 0x51, 0x50, 0x51, 0x9b, 0x51, 0xe6, 0x52, 0x31, 0x52, 0x7c, 0x52, 0xc7, 0x53, 0x13, 0x53, 0x5f, 0x53, 0xaa, 0x53, 0xf6, 0x54, 0x42, 0x54, 0x8f, 0x54, 0xdb, 0x55, 0x28, 0x55, 0x75, 0x55, 0xc2, 0x56, 0x0f, 0x56, 0x5c, 0x56, 0xa9, 0x56, 0xf7, 0x57, 0x44, 0x57, 0x92, 0x57, 0xe0, 0x58, 0x2f, 0x58, 0x7d, 0x58, 0xcb, 0x59, 0x1a, 0x59, 0x69, 0x59, 0xb8, 0x5a, 0x07, 0x5a, 0x56, 0x5a, 0xa6, 0x5a, 0xf5, 0x5b, 0x45, 0x5b, 0x95, 0x5b, 0xe5, 0x5c, 0x35, 0x5c, 0x86, 0x5c, 0xd6, 0x5d, 0x27, 0x5d, 0x78, 0x5d, 0xc9, 0x5e, 0x1a, 0x5e, 0x6c, 0x5e, 0xbd, 0x5f, 0x0f, 0x5f, 0x61, 0x5f, 0xb3, 0x60, 0x05, 0x60, 0x57, 0x60, 0xaa, 0x60, 0xfc, 0x61, 0x4f, 0x61, 0xa2, 0x61, 0xf5, 0x62, 0x49, 0x62, 0x9c, 0x62, 0xf0, 0x63, 0x43, 0x63, 0x97, 0x63, 0xeb, 0x64, 0x40, 0x64, 0x94, 0x64, 0xe9, 0x65, 0x3d, 0x65, 0x92, 0x65, 0xe7, 0x66, 0x3d, 0x66, 0x92, 0x66, 0xe8, 0x67, 0x3d, 0x67, 0x93, 0x67, 0xe9, 0x68, 0x3f, 0x68, 0x96, 0x68, 0xec, 0x69, 0x43, 0x69, 0x9a, 0x69, 0xf1, 0x6a, 0x48, 0x6a, 0x9f, 0x6a, 0xf7, 0x6b, 0x4f, 0x6b, 0xa7, 0x6b, 0xff, 0x6c, 0x57, 0x6c, 0xaf, 0x6d, 0x08, 0x6d, 0x60, 0x6d, 0xb9, 0x6e, 0x12, 0x6e, 0x6b, 0x6e, 0xc4, 0x6f, 0x1e, 0x6f, 0x78, 0x6f, 0xd1, 0x70, 0x2b, 0x70, 0x86, 0x70, 0xe0, 0x71, 0x3a, 0x71, 0x95, 0x71, 0xf0, 0x72, 0x4b, 0x72, 0xa6, 0x73, 0x01, 0x73, 0x5d, 0x73, 0xb8, 0x74, 0x14, 0x74, 0x70, 0x74, 0xcc, 0x75, 0x28, 0x75, 0x85, 0x75, 0xe1, 0x76, 0x3e, 0x76, 0x9b, 0x76, 0xf8, 0x77, 0x56, 0x77, 0xb3, 0x78, 0x11, 0x78, 0x6e, 0x78, 0xcc, 0x79, 0x2a, 0x79, 0x89, 0x79, 0xe7, 0x7a, 0x46, 0x7a, 0xa5, 0x7b, 0x04, 0x7b, 0x63, 0x7b, 0xc2, 0x7c, 0x21, 0x7c, 0x81, 0x7c, 0xe1, 0x7d, 0x41, 0x7d, 0xa1, 0x7e, 0x01, 0x7e, 0x62, 0x7e, 0xc2, 0x7f, 0x23, 0x7f, 0x84, 0x7f, 0xe5, 0x80, 0x47, 0x80, 0xa8, 0x81, 0x0a, 0x81, 0x6b, 0x81, 0xcd, 0x82, 0x30, 0x82, 0x92, 0x82, 0xf4, 0x83, 0x57, 0x83, 0xba, 0x84, 0x1d, 0x84, 0x80, 0x84, 0xe3, 0x85, 0x47, 0x85, 0xab, 0x86, 0x0e, 0x86, 0x72, 0x86, 0xd7, 0x87, 0x3b, 0x87, 0x9f, 0x88, 0x04, 0x88, 0x69, 0x88, 0xce, 0x89, 0x33, 0x89, 0x99, 0x89, 0xfe, 0x8a, 0x64, 0x8a, 0xca, 0x8b, 0x30, 0x8b, 0x96, 0x8b, 0xfc, 0x8c, 0x63, 0x8c, 0xca, 0x8d, 0x31, 0x8d, 0x98, 0x8d, 0xff, 0x8e, 0x66, 0x8e, 0xce, 0x8f, 0x36, 0x8f, 0x9e, 0x90, 0x06, 0x90, 0x6e, 0x90, 0xd6, 0x91, 0x3f, 0x91, 0xa8, 0x92, 0x11, 0x92, 0x7a, 0x92, 0xe3, 0x93, 0x4d, 0x93, 0xb6, 0x94, 0x20, 0x94, 0x8a, 0x94, 0xf4, 0x95, 0x5f, 0x95, 0xc9, 0x96, 0x34, 0x96, 0x9f, 0x97, 0x0a, 0x97, 0x75, 0x97, 0xe0, 0x98, 0x4c, 0x98, 0xb8, 0x99, 0x24, 0x99, 0x90, 0x99, 0xfc, 0x9a, 0x68, 0x9a, 0xd5, 0x9b, 0x42, 0x9b, 0xaf, 0x9c, 0x1c, 0x9c, 0x89, 0x9c, 0xf7, 0x9d, 0x64, 0x9d, 0xd2, 0x9e, 0x40, 0x9e, 0xae, 0x9f, 0x1d, 0x9f, 0x8b, 0x9f, 0xfa, 0xa0, 0x69, 0xa0, 0xd8, 0xa1, 0x47, 0xa1, 0xb6, 0xa2, 0x26, 0xa2, 0x96, 0xa3, 0x06, 0xa3, 0x76, 0xa3, 0xe6, 0xa4, 0x56, 0xa4, 0xc7, 0xa5, 0x38, 0xa5, 0xa9, 0xa6, 0x1a, 0xa6, 0x8b, 0xa6, 0xfd, 0xa7, 0x6e, 0xa7, 0xe0, 0xa8, 0x52, 0xa8, 0xc4, 0xa9, 0x37, 0xa9, 0xa9, 0xaa, 0x1c, 0xaa, 0x8f, 0xab, 0x02, 0xab, 0x75, 0xab, 0xe9, 0xac, 0x5c, 0xac, 0xd0, 0xad, 0x44, 0xad, 0xb8, 0xae, 0x2d, 0xae, 0xa1, 0xaf, 0x16, 0xaf, 0x8b, 0xb0, 0x00, 0xb0, 0x75, 0xb0, 0xea, 0xb1, 0x60, 0xb1, 0xd6, 0xb2, 0x4b, 0xb2, 0xc2, 0xb3, 0x38, 0xb3, 0xae, 0xb4, 0x25, 0xb4, 0x9c, 0xb5, 0x13, 0xb5, 0x8a, 0xb6, 0x01, 0xb6, 0x79, 0xb6, 0xf0, 0xb7, 0x68, 0xb7, 0xe0, 0xb8, 0x59, 0xb8, 0xd1, 0xb9, 0x4a, 0xb9, 0xc2, 0xba, 0x3b, 0xba, 0xb5, 0xbb, 0x2e, 0xbb, 0xa7, 0xbc, 0x21, 0xbc, 0x9b, 0xbd, 0x15, 0xbd, 0x8f, 0xbe, 0x0a, 0xbe, 0x84, 0xbe, 0xff, 0xbf, 0x7a, 0xbf, 0xf5, 0xc0, 0x70, 0xc0, 0xec, 0xc1, 0x67, 0xc1, 0xe3, 0xc2, 0x5f, 0xc2, 0xdb, 0xc3, 0x58, 0xc3, 0xd4, 0xc4, 0x51, 0xc4, 0xce, 0xc5, 0x4b, 0xc5, 0xc8, 0xc6, 0x46, 0xc6, 0xc3, 0xc7, 0x41, 0xc7, 0xbf, 0xc8, 0x3d, 0xc8, 0xbc, 0xc9, 0x3a, 0xc9, 0xb9, 0xca, 0x38, 0xca, 0xb7, 0xcb, 0x36, 0xcb, 0xb6, 0xcc, 0x35, 0xcc, 0xb5, 0xcd, 0x35, 0xcd, 0xb5, 0xce, 0x36, 0xce, 0xb6, 0xcf, 0x37, 0xcf, 0xb8, 0xd0, 0x39, 0xd0, 0xba, 0xd1, 0x3c, 0xd1, 0xbe, 0xd2, 0x3f, 0xd2, 0xc1, 0xd3, 0x44, 0xd3, 0xc6, 0xd4, 0x49, 0xd4, 0xcb, 0xd5, 0x4e, 0xd5, 0xd1, 0xd6, 0x55, 0xd6, 0xd8, 0xd7, 0x5c, 0xd7, 0xe0, 0xd8, 0x64, 0xd8, 0xe8, 0xd9, 0x6c, 0xd9, 0xf1, 0xda, 0x76, 0xda, 0xfb, 0xdb, 0x80, 0xdc, 0x05, 0xdc, 0x8a, 0xdd, 0x10, 0xdd, 0x96, 0xde, 0x1c, 0xde, 0xa2, 0xdf, 0x29, 0xdf, 0xaf, 0xe0, 0x36, 0xe0, 0xbd, 0xe1, 0x44, 0xe1, 0xcc, 0xe2, 0x53, 0xe2, 0xdb, 0xe3, 0x63, 0xe3, 0xeb, 0xe4, 0x73, 0xe4, 0xfc, 0xe5, 0x84, 0xe6, 0x0d, 0xe6, 0x96, 0xe7, 0x1f, 0xe7, 0xa9, 0xe8, 0x32, 0xe8, 0xbc, 0xe9, 0x46, 0xe9, 0xd0, 0xea, 0x5b, 0xea, 0xe5, 0xeb, 0x70, 0xeb, 0xfb, 0xec, 0x86, 0xed, 0x11, 0xed, 0x9c, 0xee, 0x28, 0xee, 0xb4, 0xef, 0x40, 0xef, 0xcc, 0xf0, 0x58, 0xf0, 0xe5, 0xf1, 0x72, 0xf1, 0xff, 0xf2, 0x8c, 0xf3, 0x19, 0xf3, 0xa7, 0xf4, 0x34, 0xf4, 0xc2, 0xf5, 0x50, 0xf5, 0xde, 0xf6, 0x6d, 0xf6, 0xfb, 0xf7, 0x8a, 0xf8, 0x19, 0xf8, 0xa8, 0xf9, 0x38, 0xf9, 0xc7, 0xfa, 0x57, 0xfa, 0xe7, 0xfb, 0x77, 0xfc, 0x07, 0xfc, 0x98, 0xfd, 0x29, 0xfd, 0xba, 0xfe, 0x4b, 0xfe, 0xdc, 0xff, 0x6d, 0xff, 0xff }; StringInfo *profile; MagickBooleanType status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (GetImageProfile(image,"icc") != (const StringInfo *) NULL) return(MagickFalse); profile=AcquireStringInfo(sizeof(sRGBProfile)); SetStringInfoDatum(profile,sRGBProfile); status=SetImageProfile(image,"icc",profile,exception); profile=DestroyStringInfo(profile); return(status); } MagickExport MagickBooleanType ProfileImage(Image *image,const char *name, const void *datum,const size_t length,ExceptionInfo *exception) { #define ProfileImageTag "Profile/Image" #ifndef TYPE_XYZ_8 #define TYPE_XYZ_8 (COLORSPACE_SH(PT_XYZ)|CHANNELS_SH(3)|BYTES_SH(1)) #endif #define ThrowProfileException(severity,tag,context) \ { \ if (profile != (StringInfo *) NULL) \ profile=DestroyStringInfo(profile); \ if (cms_context != (cmsContext) NULL) \ cmsDeleteContext(cms_context); \ if (source_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(source_info.profile); \ if (target_info.profile != (cmsHPROFILE) NULL) \ (void) cmsCloseProfile(target_info.profile); \ ThrowBinaryException(severity,tag,context); \ } MagickBooleanType status; StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(name != (const char *) NULL); if ((datum == (const void *) NULL) || (length == 0)) { char *next; /* Delete image profile(s). */ ResetImageProfileIterator(image); for (next=GetNextImageProfile(image); next != (const char *) NULL; ) { if (IsOptionMember(next,name) != MagickFalse) { (void) DeleteImageProfile(image,next); ResetImageProfileIterator(image); } next=GetNextImageProfile(image); } return(MagickTrue); } /* Add a ICC, IPTC, or generic profile to the image. */ status=MagickTrue; profile=AcquireStringInfo((size_t) length); SetStringInfoDatum(profile,(unsigned char *) datum); if ((LocaleCompare(name,"icc") != 0) && (LocaleCompare(name,"icm") != 0)) status=SetImageProfile(image,name,profile,exception); else { const StringInfo *icc_profile; icc_profile=GetImageProfile(image,"icc"); if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { const char *value; value=GetImageProperty(image,"exif:ColorSpace",exception); (void) value; if (LocaleCompare(value,"1") != 0) (void) SetsRGBImageProfile(image,exception); value=GetImageProperty(image,"exif:InteroperabilityIndex",exception); if (LocaleCompare(value,"R98.") != 0) (void) SetsRGBImageProfile(image,exception); icc_profile=GetImageProfile(image,"icc"); } if ((icc_profile != (const StringInfo *) NULL) && (CompareStringInfo(icc_profile,profile) == 0)) { profile=DestroyStringInfo(profile); return(MagickTrue); } #if !defined(MAGICKCORE_LCMS_DELEGATE) (void) ThrowMagickException(exception,GetMagickModule(), MissingDelegateWarning,"DelegateLibrarySupportNotBuiltIn", "'%s' (LCMS)",image->filename); #else { cmsContext cms_context; CMSExceptionInfo cms_exception; LCMSInfo source_info, target_info; /* Transform pixel colors as defined by the color profiles. */ cms_exception.image=image; cms_exception.exception=exception; cms_context=cmsCreateContext(NULL,&cms_exception); if (cms_context == (cmsContext) NULL) { profile=DestroyStringInfo(profile); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } cmsSetLogErrorHandlerTHR(cms_context,CMSExceptionHandler); source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(profile),(cmsUInt32Number) GetStringInfoLength(profile)); if (source_info.profile == (cmsHPROFILE) NULL) { profile=DestroyStringInfo(profile); cmsDeleteContext(cms_context); ThrowBinaryException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } if ((cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass) && (icc_profile == (StringInfo *) NULL)) status=SetImageProfile(image,name,profile,exception); else { CacheView *image_view; cmsColorSpaceSignature signature; cmsHTRANSFORM *magick_restrict transform; cmsUInt32Number flags; MagickBooleanType highres; MagickOffsetType progress; ssize_t y; target_info.profile=(cmsHPROFILE) NULL; if (icc_profile != (StringInfo *) NULL) { target_info.profile=source_info.profile; source_info.profile=cmsOpenProfileFromMemTHR(cms_context, GetStringInfoDatum(icc_profile),(cmsUInt32Number) GetStringInfoLength(icc_profile)); if (source_info.profile == (cmsHPROFILE) NULL) ThrowProfileException(ResourceLimitError, "ColorspaceColorProfileMismatch",name); } highres=MagickTrue; #if !defined(MAGICKCORE_HDRI_SUPPORT) || (MAGICKCORE_QUANTUM_DEPTH > 16) { const char *artifact; artifact=GetImageArtifact(image,"profile:highres-transform"); if (IsStringFalse(artifact) != MagickFalse) highres=MagickFalse; } #endif SetLCMSInfoScale(&source_info,1.0); SetLCMSInfoTranslate(&source_info,0.0); source_info.colorspace=sRGBColorspace; source_info.channels=3; switch (cmsGetColorSpace(source_info.profile)) { case cmsSigCmykData: { source_info.colorspace=CMYKColorspace; source_info.channels=4; if (highres != MagickFalse) { source_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; SetLCMSInfoScale(&source_info,100.0); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_CMYK_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { source_info.colorspace=GRAYColorspace; source_info.channels=1; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_GRAY_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { source_info.colorspace=LabColorspace; if (highres != MagickFalse) { source_info.type=(cmsUInt32Number) TYPE_Lab_DBL; source_info.scale[0]=100.0; source_info.scale[1]=255.0; source_info.scale[2]=255.0; source_info.translate[1]=(-0.5); source_info.translate[2]=(-0.5); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_Lab_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } case cmsSigRgbData: { source_info.colorspace=sRGBColorspace; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_RGB_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_RGB_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { source_info.colorspace=XYZColorspace; if (highres != MagickFalse) source_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else source_info.type=(cmsUInt32Number) TYPE_XYZ_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } signature=cmsGetPCS(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) signature=cmsGetColorSpace(target_info.profile); SetLCMSInfoScale(&target_info,1.0); SetLCMSInfoTranslate(&target_info,0.0); target_info.channels=3; switch (signature) { case cmsSigCmykData: { target_info.colorspace=CMYKColorspace; target_info.channels=4; if (highres != MagickFalse) { target_info.type=(cmsUInt32Number) TYPE_CMYK_DBL; SetLCMSInfoScale(&target_info,0.01); } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_CMYK_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_CMYK_16; #endif break; } case cmsSigGrayData: { target_info.colorspace=GRAYColorspace; target_info.channels=1; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_GRAY_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_GRAY_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_GRAY_16; #endif break; } case cmsSigLabData: { target_info.colorspace=LabColorspace; if (highres != MagickFalse) { target_info.type=(cmsUInt32Number) TYPE_Lab_DBL; target_info.scale[0]=0.01; target_info.scale[1]=1/255.0; target_info.scale[2]=1/255.0; target_info.translate[1]=0.5; target_info.translate[2]=0.5; } #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_Lab_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_Lab_16; #endif break; } case cmsSigRgbData: { target_info.colorspace=sRGBColorspace; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_RGB_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_RGB_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else target_info.type=(cmsUInt32Number) TYPE_RGB_16; #endif break; } case cmsSigXYZData: { target_info.colorspace=XYZColorspace; if (highres != MagickFalse) target_info.type=(cmsUInt32Number) TYPE_XYZ_DBL; #if (MAGICKCORE_QUANTUM_DEPTH == 8) else target_info.type=(cmsUInt32Number) TYPE_XYZ_8; #elif (MAGICKCORE_QUANTUM_DEPTH == 16) else source_info.type=(cmsUInt32Number) TYPE_XYZ_16; #endif break; } default: ThrowProfileException(ImageError, "ColorspaceColorProfileMismatch",name); } switch (image->rendering_intent) { case AbsoluteIntent: { target_info.intent=INTENT_ABSOLUTE_COLORIMETRIC; break; } case PerceptualIntent: { target_info.intent=INTENT_PERCEPTUAL; break; } case RelativeIntent: { target_info.intent=INTENT_RELATIVE_COLORIMETRIC; break; } case SaturationIntent: { target_info.intent=INTENT_SATURATION; break; } default: { target_info.intent=INTENT_PERCEPTUAL; break; } } flags=cmsFLAGS_HIGHRESPRECALC; #if defined(cmsFLAGS_BLACKPOINTCOMPENSATION) if (image->black_point_compensation != MagickFalse) flags|=cmsFLAGS_BLACKPOINTCOMPENSATION; #endif transform=AcquireTransformThreadSet(&source_info,&target_info,flags, cms_context); if (transform == (cmsHTRANSFORM *) NULL) ThrowProfileException(ImageError,"UnableToCreateColorTransform", name); /* Transform image as dictated by the source & target image profiles. */ source_info.pixels=AcquirePixelThreadSet(image->columns, source_info.channels,highres); target_info.pixels=AcquirePixelThreadSet(image->columns, target_info.channels,highres); if ((source_info.pixels == (void **) NULL) || (target_info.pixels == (void **) NULL)) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); ThrowProfileException(ResourceLimitError, "MemoryAllocationFailed",image->filename); } if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) { target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if (source_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(source_info.profile); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); return(MagickFalse); } if (target_info.colorspace == CMYKColorspace) (void) SetImageColorspace(image,target_info.colorspace,exception); progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); MagickBooleanType sync; Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } if (highres != MagickFalse) TransformDoublePixels(id,image,&source_info,&target_info, transform,q); else TransformQuantumPixels(id,image,&source_info,&target_info, transform,q); sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp atomic #endif progress++; proceed=SetImageProgress(image,ProfileImageTag,progress, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) SetImageColorspace(image,target_info.colorspace,exception); switch (signature) { case cmsSigRgbData: { image->type=image->alpha_trait == UndefinedPixelTrait ? TrueColorType : TrueColorAlphaType; break; } case cmsSigCmykData: { image->type=image->alpha_trait == UndefinedPixelTrait ? ColorSeparationType : ColorSeparationAlphaType; break; } case cmsSigGrayData: { image->type=image->alpha_trait == UndefinedPixelTrait ? GrayscaleType : GrayscaleAlphaType; break; } default: break; } target_info.pixels=DestroyPixelThreadSet(target_info.pixels); source_info.pixels=DestroyPixelThreadSet(source_info.pixels); transform=DestroyTransformThreadSet(transform); if ((status != MagickFalse) && (cmsGetDeviceClass(source_info.profile) != cmsSigLinkClass)) status=SetImageProfile(image,name,profile,exception); if (target_info.profile != (cmsHPROFILE) NULL) (void) cmsCloseProfile(target_info.profile); } (void) cmsCloseProfile(source_info.profile); cmsDeleteContext(cms_context); } #endif } profile=DestroyStringInfo(profile); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e m o v e I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RemoveImageProfile() removes a named profile from the image and returns its % value. % % The format of the RemoveImageProfile method is: % % void *RemoveImageProfile(Image *image,const char *name) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name. % */ MagickExport StringInfo *RemoveImageProfile(Image *image,const char *name) { StringInfo *profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return((StringInfo *) NULL); WriteTo8BimProfile(image,name,(StringInfo *) NULL); profile=(StringInfo *) RemoveNodeFromSplayTree((SplayTreeInfo *) image->profiles,name); return(profile); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t P r o f i l e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageProfileIterator() resets the image profile iterator. Use it in % conjunction with GetNextImageProfile() to iterate over all the profiles % associated with an image. % % The format of the ResetImageProfileIterator method is: % % ResetImageProfileIterator(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageProfileIterator(const Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->profiles == (SplayTreeInfo *) NULL) return; ResetSplayTreeIterator((SplayTreeInfo *) image->profiles); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P r o f i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageProfile() adds a named profile to the image. If a profile with the % same name already exists, it is replaced. This method differs from the % ProfileImage() method in that it does not apply CMS color profiles. % % The format of the SetImageProfile method is: % % MagickBooleanType SetImageProfile(Image *image,const char *name, % const StringInfo *profile) % % A description of each parameter follows: % % o image: the image. % % o name: the profile name, for example icc, exif, and 8bim (8bim is the % Photoshop wrapper for iptc profiles). % % o profile: A StringInfo structure that contains the named profile. % */ static void *DestroyProfile(void *profile) { return((void *) DestroyStringInfo((StringInfo *) profile)); } static inline const unsigned char *ReadResourceByte(const unsigned char *p, unsigned char *quantum) { *quantum=(*p++); return(p); } static inline const unsigned char *ReadResourceLong(const unsigned char *p, unsigned int *quantum) { *quantum=(unsigned int) (*p++) << 24; *quantum|=(unsigned int) (*p++) << 16; *quantum|=(unsigned int) (*p++) << 8; *quantum|=(unsigned int) (*p++); return(p); } static inline const unsigned char *ReadResourceShort(const unsigned char *p, unsigned short *quantum) { *quantum=(unsigned short) (*p++) << 8; *quantum|=(unsigned short) (*p++); return(p); } static inline void WriteResourceLong(unsigned char *p, const unsigned int quantum) { unsigned char buffer[4]; buffer[0]=(unsigned char) (quantum >> 24); buffer[1]=(unsigned char) (quantum >> 16); buffer[2]=(unsigned char) (quantum >> 8); buffer[3]=(unsigned char) quantum; (void) memcpy(p,buffer,4); } static void WriteTo8BimProfile(Image *image,const char *name, const StringInfo *profile) { const unsigned char *datum, *q; const unsigned char *p; size_t length; StringInfo *profile_8bim; ssize_t count; unsigned char length_byte; unsigned int value; unsigned short id, profile_id; if (LocaleCompare(name,"icc") == 0) profile_id=0x040f; else if (LocaleCompare(name,"iptc") == 0) profile_id=0x0404; else if (LocaleCompare(name,"xmp") == 0) profile_id=0x0424; else return; profile_8bim=(StringInfo *) GetValueFromSplayTree((SplayTreeInfo *) image->profiles,"8bim"); if (profile_8bim == (StringInfo *) NULL) return; datum=GetStringInfoDatum(profile_8bim); length=GetStringInfoLength(profile_8bim); for (p=datum; p < (datum+length-16); ) { q=p; if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((count & 0x01) != 0) count++; if ((count < 0) || (p > (datum+length-count)) || (count > (ssize_t) length)) break; if (id != profile_id) p+=count; else { size_t extent, offset; ssize_t extract_extent; StringInfo *extract_profile; extract_extent=0; extent=(datum+length)-(p+count); if (profile == (StringInfo *) NULL) { offset=(q-datum); extract_profile=AcquireStringInfo(offset+extent); (void) memcpy(extract_profile->datum,datum,offset); } else { offset=(p-datum); extract_extent=profile->length; if ((extract_extent & 0x01) != 0) extract_extent++; extract_profile=AcquireStringInfo(offset+extract_extent+extent); (void) memcpy(extract_profile->datum,datum,offset-4); WriteResourceLong(extract_profile->datum+offset-4,(unsigned int) profile->length); (void) memcpy(extract_profile->datum+offset, profile->datum,profile->length); } (void) memcpy(extract_profile->datum+offset+extract_extent, p+count,extent); (void) AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString("8bim"),CloneStringInfo(extract_profile)); extract_profile=DestroyStringInfo(extract_profile); break; } } } static void GetProfilesFromResourceBlock(Image *image, const StringInfo *resource_block,ExceptionInfo *exception) { const unsigned char *datum; const unsigned char *p; size_t length; ssize_t count; StringInfo *profile; unsigned char length_byte; unsigned int value; unsigned short id; datum=GetStringInfoDatum(resource_block); length=GetStringInfoLength(resource_block); for (p=datum; p < (datum+length-16); ) { if (LocaleNCompare((char *) p,"8BIM",4) != 0) break; p+=4; p=ReadResourceShort(p,&id); p=ReadResourceByte(p,&length_byte); p+=length_byte; if (((length_byte+1) & 0x01) != 0) p++; if (p > (datum+length-4)) break; p=ReadResourceLong(p,&value); count=(ssize_t) value; if ((p > (datum+length-count)) || (count > (ssize_t) length) || (count < 0)) break; switch (id) { case 0x03ed: { unsigned int resolution; unsigned short units; /* Resolution. */ if (count < 10) break; p=ReadResourceLong(p,&resolution); image->resolution.x=((double) resolution)/65536.0; p=ReadResourceShort(p,&units)+2; p=ReadResourceLong(p,&resolution)+4; image->resolution.y=((double) resolution)/65536.0; /* Values are always stored as pixels per inch. */ if ((ResolutionType) units != PixelsPerCentimeterResolution) image->units=PixelsPerInchResolution; else { image->units=PixelsPerCentimeterResolution; image->resolution.x/=2.54; image->resolution.y/=2.54; } break; } case 0x0404: { /* IPTC Profile */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"iptc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x040c: { /* Thumbnail. */ p+=count; break; } case 0x040f: { /* ICC Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"icc",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0422: { /* EXIF Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"exif",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } case 0x0424: { /* XMP Profile. */ profile=AcquireStringInfo(count); SetStringInfoDatum(profile,p); (void) SetImageProfileInternal(image,"xmp",profile,MagickTrue, exception); profile=DestroyStringInfo(profile); p+=count; break; } default: { p+=count; break; } } if ((count & 0x01) != 0) p++; } } static void PatchCorruptProfile(const char *name,StringInfo *profile) { unsigned char *p; size_t length; /* Detect corrupt profiles and if discovered, repair. */ if (LocaleCompare(name,"xmp") == 0) { /* Remove garbage after xpacket end. */ p=GetStringInfoDatum(profile); p=(unsigned char *) strstr((const char *) p,"<?xpacket end=\"w\"?>"); if (p != (unsigned char *) NULL) { p+=19; length=p-GetStringInfoDatum(profile); if (length != GetStringInfoLength(profile)) { *p='\0'; SetStringInfoLength(profile,length); } } return; } if (LocaleCompare(name,"exif") == 0) { /* Check if profile starts with byte order marker instead of Exif. */ p=GetStringInfoDatum(profile); if ((LocaleNCompare((const char *) p,"MM",2) == 0) || (LocaleNCompare((const char *) p,"II",2) == 0)) { const unsigned char profile_start[] = "Exif\0\0"; StringInfo *exif_profile; exif_profile=AcquireStringInfo(6); if (exif_profile != (StringInfo *) NULL) { SetStringInfoDatum(exif_profile,profile_start); ConcatenateStringInfo(exif_profile,profile); SetStringInfoLength(profile,GetStringInfoLength(exif_profile)); SetStringInfo(profile,exif_profile); exif_profile=DestroyStringInfo(exif_profile); } } } } #if defined(MAGICKCORE_XML_DELEGATE) static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile,ExceptionInfo *exception) { xmlDocPtr document; /* Parse XML profile. */ document=xmlReadMemory((const char *) GetStringInfoDatum(profile),(int) GetStringInfoLength(profile),"xmp.xml",NULL,XML_PARSE_NOERROR | XML_PARSE_NOWARNING); if (document == (xmlDocPtr) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),ImageWarning, "CorruptImageProfile","`%s' (XMP)",image->filename); return(MagickFalse); } xmlFreeDoc(document); return(MagickTrue); } #else static MagickBooleanType ValidateXMPProfile(Image *image, const StringInfo *profile,ExceptionInfo *exception) { (void) ThrowMagickException(exception,GetMagickModule(),MissingDelegateWarning, "DelegateLibrarySupportNotBuiltIn","'%s' (XML)",image->filename); return(MagickFalse); } #endif static MagickBooleanType SetImageProfileInternal(Image *image,const char *name, const StringInfo *profile,const MagickBooleanType recursive, ExceptionInfo *exception) { char key[MagickPathExtent]; MagickBooleanType status; StringInfo *clone_profile; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); clone_profile=CloneStringInfo(profile); PatchCorruptProfile(name,clone_profile); if ((LocaleCompare(name,"xmp") == 0) && (ValidateXMPProfile(image,clone_profile,exception) == MagickFalse)) { clone_profile=DestroyStringInfo(clone_profile); return(MagickTrue); } if (image->profiles == (SplayTreeInfo *) NULL) image->profiles=NewSplayTree(CompareSplayTreeString,RelinquishMagickMemory, DestroyProfile); (void) CopyMagickString(key,name,MagickPathExtent); LocaleLower(key); status=AddValueToSplayTree((SplayTreeInfo *) image->profiles, ConstantString(key),clone_profile); if (status != MagickFalse) { if (LocaleCompare(name,"8bim") == 0) GetProfilesFromResourceBlock(image,clone_profile,exception); else if (recursive == MagickFalse) WriteTo8BimProfile(image,name,clone_profile); } return(status); } MagickExport MagickBooleanType SetImageProfile(Image *image,const char *name, const StringInfo *profile,ExceptionInfo *exception) { return(SetImageProfileInternal(image,name,profile,MagickFalse,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P r o f i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImageProfiles() synchronizes image properties with the image profiles. % Currently we only support updating the EXIF resolution and orientation. % % The format of the SyncImageProfiles method is: % % MagickBooleanType SyncImageProfiles(Image *image) % % A description of each parameter follows: % % o image: the image. % */ static inline int ReadProfileByte(unsigned char **p,size_t *length) { int c; if (*length < 1) return(EOF); c=(int) (*(*p)++); (*length)--; return(c); } static inline signed short ReadProfileShort(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned short value; if (endian == LSBEndian) { value=(unsigned short) buffer[1] << 8; value|=(unsigned short) buffer[0]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } value=(unsigned short) buffer[0] << 8; value|=(unsigned short) buffer[1]; quantum.unsigned_value=value & 0xffff; return(quantum.signed_value); } static inline signed int ReadProfileLong(const EndianType endian, unsigned char *buffer) { union { unsigned int unsigned_value; signed int signed_value; } quantum; unsigned int value; if (endian == LSBEndian) { value=(unsigned int) buffer[3] << 24; value|=(unsigned int) buffer[2] << 16; value|=(unsigned int) buffer[1] << 8; value|=(unsigned int) buffer[0]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } value=(unsigned int) buffer[0] << 24; value|=(unsigned int) buffer[1] << 16; value|=(unsigned int) buffer[2] << 8; value|=(unsigned int) buffer[3]; quantum.unsigned_value=value & 0xffffffff; return(quantum.signed_value); } static inline signed int ReadProfileMSBLong(unsigned char **p,size_t *length) { signed int value; if (*length < 4) return(0); value=ReadProfileLong(MSBEndian,*p); (*length)-=4; *p+=4; return(value); } static inline signed short ReadProfileMSBShort(unsigned char **p, size_t *length) { signed short value; if (*length < 2) return(0); value=ReadProfileShort(MSBEndian,*p); (*length)-=2; *p+=2; return(value); } static inline void WriteProfileLong(const EndianType endian, const size_t value,unsigned char *p) { unsigned char buffer[4]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); buffer[2]=(unsigned char) (value >> 16); buffer[3]=(unsigned char) (value >> 24); (void) memcpy(p,buffer,4); return; } buffer[0]=(unsigned char) (value >> 24); buffer[1]=(unsigned char) (value >> 16); buffer[2]=(unsigned char) (value >> 8); buffer[3]=(unsigned char) value; (void) memcpy(p,buffer,4); } static void WriteProfileShort(const EndianType endian, const unsigned short value,unsigned char *p) { unsigned char buffer[2]; if (endian == LSBEndian) { buffer[0]=(unsigned char) value; buffer[1]=(unsigned char) (value >> 8); (void) memcpy(p,buffer,2); return; } buffer[0]=(unsigned char) (value >> 8); buffer[1]=(unsigned char) value; (void) memcpy(p,buffer,2); } static MagickBooleanType SyncExifProfile(const Image *image,unsigned char *exif, size_t length) { #define MaxDirectoryStack 16 #define EXIF_DELIMITER "\n" #define EXIF_NUM_FORMATS 12 #define TAG_EXIF_OFFSET 0x8769 #define TAG_INTEROP_OFFSET 0xa005 typedef struct _DirectoryInfo { unsigned char *directory; size_t entry; } DirectoryInfo; DirectoryInfo directory_stack[MaxDirectoryStack]; EndianType endian; size_t entry, number_entries; SplayTreeInfo *exif_resources; ssize_t id, level, offset; static int format_bytes[] = {0, 1, 1, 2, 4, 8, 1, 1, 2, 4, 8, 4, 8}; unsigned char *directory; if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); if ((id != 0x4949) && (id != 0x4D4D)) { while (length != 0) { if (ReadProfileByte(&exif,&length) != 0x45) continue; if (ReadProfileByte(&exif,&length) != 0x78) continue; if (ReadProfileByte(&exif,&length) != 0x69) continue; if (ReadProfileByte(&exif,&length) != 0x66) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; if (ReadProfileByte(&exif,&length) != 0x00) continue; break; } if (length < 16) return(MagickFalse); id=(ssize_t) ReadProfileShort(LSBEndian,exif); } endian=LSBEndian; if (id == 0x4949) endian=LSBEndian; else if (id == 0x4D4D) endian=MSBEndian; else return(MagickFalse); if (ReadProfileShort(endian,exif+2) != 0x002a) return(MagickFalse); /* This the offset to the first IFD. */ offset=(ssize_t) ReadProfileLong(endian,exif+4); if ((offset < 0) || ((size_t) offset >= length)) return(MagickFalse); directory=exif+offset; level=0; entry=0; exif_resources=NewSplayTree((int (*)(const void *,const void *)) NULL, (void *(*)(void *)) NULL,(void *(*)(void *)) NULL); do { if (level > 0) { level--; directory=directory_stack[level].directory; entry=directory_stack[level].entry; } if ((directory < exif) || (directory > (exif+length-2))) break; /* Determine how many entries there are in the current IFD. */ number_entries=ReadProfileShort(endian,directory); for ( ; entry < number_entries; entry++) { int components; unsigned char *p, *q; size_t number_bytes; ssize_t format, tag_value; q=(unsigned char *) (directory+2+(12*entry)); if (q > (exif+length-12)) break; /* corrupt EXIF */ if (GetValueFromSplayTree(exif_resources,q) == q) break; (void) AddValueToSplayTree(exif_resources,q,q); tag_value=(ssize_t) ReadProfileShort(endian,q); format=(ssize_t) ReadProfileShort(endian,q+2); if ((format < 0) || ((format-1) >= EXIF_NUM_FORMATS)) break; components=(int) ReadProfileLong(endian,q+4); if (components < 0) break; /* corrupt EXIF */ number_bytes=(size_t) components*format_bytes[format]; if ((ssize_t) number_bytes < components) break; /* prevent overflow */ if (number_bytes <= 4) p=q+8; else { /* The directory entry contains an offset. */ offset=(ssize_t) ReadProfileLong(endian,q+8); if ((offset < 0) || ((size_t) (offset+number_bytes) > length)) continue; if (~length < number_bytes) continue; /* prevent overflow */ p=(unsigned char *) (exif+offset); } switch (tag_value) { case 0x011a: { (void) WriteProfileLong(endian,(size_t) (image->resolution.x+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x011b: { (void) WriteProfileLong(endian,(size_t) (image->resolution.y+0.5),p); if (number_bytes == 8) (void) WriteProfileLong(endian,1UL,p+4); break; } case 0x0112: { if (number_bytes == 4) { (void) WriteProfileLong(endian,(size_t) image->orientation,p); break; } (void) WriteProfileShort(endian,(unsigned short) image->orientation, p); break; } case 0x0128: { if (number_bytes == 4) { (void) WriteProfileLong(endian,((size_t) image->units)+1,p); break; } (void) WriteProfileShort(endian,(unsigned short) (image->units+1),p); break; } default: break; } if ((tag_value == TAG_EXIF_OFFSET) || (tag_value == TAG_INTEROP_OFFSET)) { offset=(ssize_t) ReadProfileLong(endian,p); if (((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=directory; entry++; directory_stack[level].entry=entry; level++; directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; if ((directory+2+(12*number_entries)) > (exif+length)) break; offset=(ssize_t) ReadProfileLong(endian,directory+2+(12* number_entries)); if ((offset != 0) && ((size_t) offset < length) && (level < (MaxDirectoryStack-2))) { directory_stack[level].directory=exif+offset; directory_stack[level].entry=0; level++; } } break; } } } while (level > 0); exif_resources=DestroySplayTree(exif_resources); return(MagickTrue); } static MagickBooleanType Sync8BimProfile(const Image *image, const StringInfo *profile) { size_t length; ssize_t count; unsigned char *p; unsigned short id; length=GetStringInfoLength(profile); p=GetStringInfoDatum(profile); while (length != 0) { if (ReadProfileByte(&p,&length) != 0x38) continue; if (ReadProfileByte(&p,&length) != 0x42) continue; if (ReadProfileByte(&p,&length) != 0x49) continue; if (ReadProfileByte(&p,&length) != 0x4D) continue; if (length < 7) return(MagickFalse); id=ReadProfileMSBShort(&p,&length); count=(ssize_t) ReadProfileByte(&p,&length); if ((count >= (ssize_t) length) || (count < 0)) return(MagickFalse); p+=count; length-=count; if ((*p & 0x01) == 0) (void) ReadProfileByte(&p,&length); count=(ssize_t) ReadProfileMSBLong(&p,&length); if ((count > (ssize_t) length) || (count < 0)) return(MagickFalse); if ((id == 0x3ED) && (count == 16)) { if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.x*2.54*65536.0),p); else WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.x*65536.0),p); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+4); if (image->units == PixelsPerCentimeterResolution) WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.y*2.54*65536.0),p+8); else WriteProfileLong(MSBEndian,(unsigned int) CastDoubleToLong( image->resolution.y*65536.0),p+8); WriteProfileShort(MSBEndian,(unsigned short) image->units,p+12); } if (id == 0x0422) (void) SyncExifProfile(image,p,count); p+=count; length-=count; } return(MagickTrue); } MagickPrivate MagickBooleanType SyncImageProfiles(Image *image) { MagickBooleanType status; StringInfo *profile; status=MagickTrue; profile=(StringInfo *) GetImageProfile(image,"8BIM"); if (profile != (StringInfo *) NULL) if (Sync8BimProfile(image,profile) == MagickFalse) status=MagickFalse; profile=(StringInfo *) GetImageProfile(image,"EXIF"); if (profile != (StringInfo *) NULL) if (SyncExifProfile(image,GetStringInfoDatum(profile), GetStringInfoLength(profile)) == MagickFalse) status=MagickFalse; return(status); } static void UpdateClipPath(unsigned char *blob,size_t length, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { ssize_t i; ssize_t knot_count, selector; knot_count=0; while (length != 0) { selector=(ssize_t) ReadProfileMSBShort(&blob,&length); switch (selector) { case 0: case 3: { if (knot_count != 0) { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Expected subpath length record. */ knot_count=(ssize_t) ReadProfileMSBShort(&blob,&length); blob+=22; length-=MagickMin(22,(ssize_t) length); break; } case 1: case 2: case 4: case 5: { if (knot_count == 0) { /* Unexpected subpath knot. */ blob+=24; length-=MagickMin(24,(ssize_t) length); break; } /* Add sub-path knot */ for (i=0; i < 3; i++) { double x, y; signed int xx, yy; y=(double) ReadProfileMSBLong(&blob,&length); y=y*old_rows/4096.0/4096.0; y-=new_geometry->y; yy=(signed int) ((y*4096*4096)/new_geometry->height); WriteProfileLong(MSBEndian,(size_t) yy,blob-4); x=(double) ReadProfileMSBLong(&blob,&length); x=x*old_columns/4096.0/4096.0; x-=new_geometry->x; xx=(signed int) ((x*4096*4096)/new_geometry->width); WriteProfileLong(MSBEndian,(size_t) xx,blob-4); } knot_count--; break; } case 6: case 7: case 8: default: { blob+=24; length-=MagickMin(24,(ssize_t) length); break; } } } } MagickPrivate void Update8BIMClipPath(const Image *image, const size_t old_columns,const size_t old_rows, const RectangleInfo *new_geometry) { const StringInfo *profile; size_t length; ssize_t count, id; unsigned char *info; assert(image != (Image *) NULL); assert(new_geometry != (RectangleInfo *) NULL); profile=GetImageProfile(image,"8bim"); if (profile == (StringInfo *) NULL) return; length=GetStringInfoLength(profile); info=GetStringInfoDatum(profile); while (length > 0) { if (ReadProfileByte(&info,&length) != (unsigned char) '8') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'B') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'I') continue; if (ReadProfileByte(&info,&length) != (unsigned char) 'M') continue; id=(ssize_t) ReadProfileMSBShort(&info,&length); count=(ssize_t) ReadProfileByte(&info,&length); if ((count != 0) && ((size_t) count <= length)) { info+=count; length-=count; } if ((count & 0x01) == 0) (void) ReadProfileByte(&info,&length); count=(ssize_t) ReadProfileMSBLong(&info,&length); if ((count < 0) || ((size_t) count > length)) { length=0; continue; } if ((id > 1999) && (id < 2999)) UpdateClipPath(info,(size_t) count,old_columns,old_rows,new_geometry); info+=count; length-=MagickMin(count,(ssize_t) length); } }
pi1.c
#include <omp.h> static long num_steps = 100000; double step; #define NUM_THREADS 2 void main () { int i; double x, sum, pi=0.0; step = 1.0/(double) num_steps; omp_set_num_threads(NUM_THREADS) #pragma omp parallel private (x, sum) { id = omp_get_thread_num(); for (i=id,sum=0.0;i< num_steps;i=i+NUM_THREADS){ x = (i+0.5)*step; sum += 4.0/(1.0+x*x); } #pragma omp critical pi += sum } }
omp_monotonic_schedule_set_get.c
// RUN: %libomp-compile-and-run // The test checks OMP 5.0 monotonic/nonmonotonic scheduling API // 1. initial schedule should be (static,0) // 2. omp_get_schedule() should return the schedule set by omp_set_schedule() // 3. schedules set inside parallel should not impact outer tasks' schedules #include <stdio.h> #ifndef __INTEL_COMPILER #define _OMPIMP #endif #define NO_MODIFIERS ((omp_sched_t)0) #include "omp.h" int global = 0; int err = 0; omp_sched_t sched_append_modifiers(omp_sched_t sched, omp_sched_t modifiers) { return (omp_sched_t)((int)sched | (int)modifiers); } omp_sched_t sched_without_modifiers(omp_sched_t sched) { return (omp_sched_t)((int)sched & ~((int)omp_sched_monotonic)); } int sched_has_modifiers(omp_sched_t sched, omp_sched_t modifiers) { return (((int)sched & ((int)omp_sched_monotonic)) > 0); } // check that sched = hope | modifiers void check_schedule(const char *extra, const omp_sched_t sched, int chunk, omp_sched_t hope_sched, int hope_chunk) { if (sched != hope_sched || chunk != hope_chunk) { #pragma omp atomic ++err; printf("Error: %s: schedule: (%d, %d) is not equal to (%d, %d)\n", extra, (int)hope_sched, hope_chunk, (int)sched, chunk); } } int main() { int i; int chunk; omp_sched_t sched0; omp_set_dynamic(0); omp_set_nested(1); // check serial region omp_get_schedule(&sched0, &chunk); #ifdef DEBUG printf("initial: (%d, %d)\n", sched0, chunk); #endif check_schedule("initial", omp_sched_static, 0, sched0, chunk); // set schedule before the parallel, check it after the parallel omp_set_schedule( sched_append_modifiers(omp_sched_dynamic, omp_sched_monotonic), 3); #pragma omp parallel num_threads(3) private(i) { omp_sched_t n_outer_set, n_outer_get; int c_outer; int tid = omp_get_thread_num(); n_outer_set = sched_append_modifiers((omp_sched_t)(tid + 1), omp_sched_monotonic); // 1, 2, 3 // check outer parallel region // master sets (static, unchunked), others - (dynamic, 1), (guided, 2) // set schedule before inner parallel, check it after the parallel omp_set_schedule(n_outer_set, tid); // Make sure this schedule doesn't crash the runtime #pragma omp for for (i = 0; i < 100; ++i) { #pragma omp atomic global++; } #pragma omp parallel num_threads(3) private(i) shared(n_outer_set) { omp_sched_t n_inner_set, n_inner_get; int c_inner_set, c_inner_get; int tid = omp_get_thread_num(); n_inner_set = (omp_sched_t)(tid + 1); // 1, 2, 3 c_inner_set = (int)(n_outer_set)*10 + (int)n_inner_set; // 11, 12, 13, 21, 22, 23, 31, 32, 33 n_inner_set = sched_append_modifiers(n_inner_set, omp_sched_monotonic); // schedules set inside parallel should not impact outer schedules omp_set_schedule(n_inner_set, c_inner_set); // Make sure this schedule doesn't crash the runtime #pragma omp for for (i = 0; i < 100; ++i) { #pragma omp atomic global++; } #pragma omp barrier omp_get_schedule(&n_inner_get, &c_inner_get); #ifdef DEBUG printf("inner parallel: o_th %d, i_th %d, (%d, %d)\n", n_outer_set - 1, tid, n_inner_get, c_inner_get); #endif check_schedule("inner", n_inner_set, c_inner_set, n_inner_get, c_inner_get); } omp_get_schedule(&n_outer_get, &c_outer); #ifdef DEBUG printf("outer parallel: thread %d, (%d, %d)\n", tid, n_outer_get, c_outer); #endif check_schedule("outer", n_outer_set, tid, n_outer_get, c_outer); } omp_get_schedule(&sched0, &chunk); #ifdef DEBUG printf("after parallels: (%d, %d)\n", sched0, chunk); #endif check_schedule("after parallels", sched_append_modifiers(omp_sched_dynamic, omp_sched_monotonic), 3, sched0, chunk); if (err > 0) { printf("Failed\n"); return 1; } printf("Passed\n"); return 0; }
Sema.h
//===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Sema class, which performs semantic analysis and // builds ASTs. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_SEMA_SEMA_H #define LLVM_CLANG_SEMA_SEMA_H #include "clang/AST/Attr.h" #include "clang/AST/DeclarationName.h" #include "clang/AST/Expr.h" #include "clang/AST/ExprObjC.h" #include "clang/AST/ExternalASTSource.h" #include "clang/AST/MangleNumberingContext.h" #include "clang/AST/NSAPI.h" #include "clang/AST/PrettyPrinter.h" #include "clang/AST/TypeLoc.h" #include "clang/Basic/ExpressionTraits.h" #include "clang/Basic/LangOptions.h" #include "clang/Basic/Module.h" #include "clang/Basic/OpenMPKinds.h" #include "clang/Basic/Specifiers.h" #include "clang/Basic/TemplateKinds.h" #include "clang/Basic/TypeTraits.h" #include "clang/Sema/AnalysisBasedWarnings.h" #include "clang/Sema/DeclSpec.h" #include "clang/Sema/ExternalSemaSource.h" #include "clang/Sema/IdentifierResolver.h" #include "clang/Sema/LocInfoType.h" #include "clang/Sema/ObjCMethodList.h" #include "clang/Sema/Ownership.h" #include "clang/Sema/Scope.h" #include "clang/Sema/ScopeInfo.h" #include "clang/Sema/TypoCorrection.h" #include "clang/Sema/Weak.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/Optional.h" #include "llvm/ADT/SetVector.h" #include "llvm/ADT/SmallPtrSet.h" #include "llvm/ADT/SmallVector.h" #include "llvm/ADT/TinyPtrVector.h" #include <deque> #include <memory> #include <string> #include <vector> namespace llvm { class APSInt; template <typename ValueT> struct DenseMapInfo; template <typename ValueT, typename ValueInfoT> class DenseSet; class SmallBitVector; class InlineAsmIdentifierInfo; } namespace clang { class ADLResult; class ASTConsumer; class ASTContext; class ASTMutationListener; class ASTReader; class ASTWriter; class ArrayType; class AttributeList; class BlockDecl; class CapturedDecl; class CXXBasePath; class CXXBasePaths; class CXXBindTemporaryExpr; typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath; class CXXConstructorDecl; class CXXConversionDecl; class CXXDestructorDecl; class CXXFieldCollector; class CXXMemberCallExpr; class CXXMethodDecl; class CXXScopeSpec; class CXXTemporary; class CXXTryStmt; class CallExpr; class ClassTemplateDecl; class ClassTemplatePartialSpecializationDecl; class ClassTemplateSpecializationDecl; class VarTemplatePartialSpecializationDecl; class CodeCompleteConsumer; class CodeCompletionAllocator; class CodeCompletionTUInfo; class CodeCompletionResult; class Decl; class DeclAccessPair; class DeclContext; class DeclRefExpr; class DeclaratorDecl; class DeducedTemplateArgument; class DependentDiagnostic; class DesignatedInitExpr; class Designation; class EnableIfAttr; class EnumConstantDecl; class Expr; class ExtVectorType; class ExternalSemaSource; class FormatAttr; class FriendDecl; class FunctionDecl; class FunctionProtoType; class FunctionTemplateDecl; class ImplicitConversionSequence; class InitListExpr; class InitializationKind; class InitializationSequence; class InitializedEntity; class IntegerLiteral; class LabelStmt; class LambdaExpr; class LangOptions; class LocalInstantiationScope; class LookupResult; class MacroInfo; typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath; class ModuleLoader; class MultiLevelTemplateArgumentList; class NamedDecl; class ObjCCategoryDecl; class ObjCCategoryImplDecl; class ObjCCompatibleAliasDecl; class ObjCContainerDecl; class ObjCImplDecl; class ObjCImplementationDecl; class ObjCInterfaceDecl; class ObjCIvarDecl; template <class T> class ObjCList; class ObjCMessageExpr; class ObjCMethodDecl; class ObjCPropertyDecl; class ObjCProtocolDecl; class OMPThreadPrivateDecl; class OMPClause; class OverloadCandidateSet; class OverloadExpr; class ParenListExpr; class ParmVarDecl; class Preprocessor; class PseudoDestructorTypeStorage; class PseudoObjectExpr; class QualType; class StandardConversionSequence; class Stmt; class StringLiteral; class SwitchStmt; class TemplateArgument; class TemplateArgumentList; class TemplateArgumentLoc; class TemplateDecl; class TemplateParameterList; class TemplatePartialOrderingContext; class TemplateTemplateParmDecl; class Token; class TypeAliasDecl; class TypedefDecl; class TypedefNameDecl; class TypeLoc; class TypoCorrectionConsumer; class UnqualifiedId; class UnresolvedLookupExpr; class UnresolvedMemberExpr; class UnresolvedSetImpl; class UnresolvedSetIterator; class UsingDecl; class UsingShadowDecl; class ValueDecl; class VarDecl; class VarTemplateSpecializationDecl; class VisibilityAttr; class VisibleDeclConsumer; class IndirectFieldDecl; struct DeductionFailureInfo; class TemplateSpecCandidateSet; namespace sema { class AccessedEntity; class BlockScopeInfo; class CapturedRegionScopeInfo; class CapturingScopeInfo; class CompoundScopeInfo; class DelayedDiagnostic; class DelayedDiagnosticPool; class FunctionScopeInfo; class LambdaScopeInfo; class PossiblyUnreachableDiag; class TemplateDeductionInfo; } // FIXME: No way to easily map from TemplateTypeParmTypes to // TemplateTypeParmDecls, so we have this horrible PointerUnion. typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>, SourceLocation> UnexpandedParameterPack; /// Sema - This implements semantic analysis and AST building for C. class Sema { Sema(const Sema &) LLVM_DELETED_FUNCTION; void operator=(const Sema &) LLVM_DELETED_FUNCTION; ///\brief Source of additional semantic information. ExternalSemaSource *ExternalSource; ///\brief Whether Sema has generated a multiplexer and has to delete it. bool isMultiplexExternalSource; static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD); static bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old, const NamedDecl *New) { // We are about to link these. It is now safe to compute the linkage of // the new decl. If the new decl has external linkage, we will // link it with the hidden decl (which also has external linkage) and // it will keep having external linkage. If it has internal linkage, we // will not link it. Since it has no previous decls, it will remain // with internal linkage. return !Old->isHidden() || New->isExternallyVisible(); } public: typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy; typedef OpaquePtr<TemplateName> TemplateTy; typedef OpaquePtr<QualType> TypeTy; OpenCLOptions OpenCLFeatures; FPOptions FPFeatures; const LangOptions &LangOpts; Preprocessor &PP; ASTContext &Context; ASTConsumer &Consumer; DiagnosticsEngine &Diags; SourceManager &SourceMgr; /// \brief Flag indicating whether or not to collect detailed statistics. bool CollectStats; /// \brief Code-completion consumer. CodeCompleteConsumer *CodeCompleter; /// CurContext - This is the current declaration context of parsing. DeclContext *CurContext; /// \brief Generally null except when we temporarily switch decl contexts, /// like in \see ActOnObjCTemporaryExitContainerContext. DeclContext *OriginalLexicalContext; /// VAListTagName - The declaration name corresponding to __va_list_tag. /// This is used as part of a hack to omit that class from ADL results. DeclarationName VAListTagName; /// PackContext - Manages the stack for \#pragma pack. An alignment /// of 0 indicates default alignment. void *PackContext; // Really a "PragmaPackStack*" bool MSStructPragmaOn; // True when \#pragma ms_struct on /// \brief Controls member pointer representation format under the MS ABI. LangOptions::PragmaMSPointersToMembersKind MSPointerToMemberRepresentationMethod; enum PragmaVtorDispKind { PVDK_Push, ///< #pragma vtordisp(push, mode) PVDK_Set, ///< #pragma vtordisp(mode) PVDK_Pop, ///< #pragma vtordisp(pop) PVDK_Reset ///< #pragma vtordisp() }; enum PragmaMsStackAction { PSK_Reset, // #pragma () PSK_Set, // #pragma ("name") PSK_Push, // #pragma (push[, id]) PSK_Push_Set, // #pragma (push[, id], "name") PSK_Pop, // #pragma (pop[, id]) PSK_Pop_Set, // #pragma (pop[, id], "name") }; /// \brief Whether to insert vtordisps prior to virtual bases in the Microsoft /// C++ ABI. Possible values are 0, 1, and 2, which mean: /// /// 0: Suppress all vtordisps /// 1: Insert vtordisps in the presence of vbase overrides and non-trivial /// structors /// 2: Always insert vtordisps to support RTTI on partially constructed /// objects /// /// The stack always has at least one element in it. SmallVector<MSVtorDispAttr::Mode, 2> VtorDispModeStack; /// \brief Source location for newly created implicit MSInheritanceAttrs SourceLocation ImplicitMSInheritanceAttrLoc; template<typename ValueType> struct PragmaStack { struct Slot { llvm::StringRef StackSlotLabel; ValueType Value; SourceLocation PragmaLocation; Slot(llvm::StringRef StackSlotLabel, ValueType Value, SourceLocation PragmaLocation) : StackSlotLabel(StackSlotLabel), Value(Value), PragmaLocation(PragmaLocation) {} }; void Act(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, ValueType Value); explicit PragmaStack(const ValueType &Value) : CurrentValue(Value) {} SmallVector<Slot, 2> Stack; ValueType CurrentValue; SourceLocation CurrentPragmaLocation; }; // FIXME: We should serialize / deserialize these if they occur in a PCH (but // we shouldn't do so if they're in a module). PragmaStack<StringLiteral *> DataSegStack; PragmaStack<StringLiteral *> BSSSegStack; PragmaStack<StringLiteral *> ConstSegStack; PragmaStack<StringLiteral *> CodeSegStack; /// Last section used with #pragma init_seg. StringLiteral *CurInitSeg; SourceLocation CurInitSegLoc; /// VisContext - Manages the stack for \#pragma GCC visibility. void *VisContext; // Really a "PragmaVisStack*" /// \brief This represents the last location of a "#pragma clang optimize off" /// directive if such a directive has not been closed by an "on" yet. If /// optimizations are currently "on", this is set to an invalid location. SourceLocation OptimizeOffPragmaLocation; /// \brief Flag indicating if Sema is building a recovery call expression. /// /// This flag is used to avoid building recovery call expressions /// if Sema is already doing so, which would cause infinite recursions. bool IsBuildingRecoveryCallExpr; /// ExprNeedsCleanups - True if the current evaluation context /// requires cleanups to be run at its conclusion. bool ExprNeedsCleanups; /// ExprCleanupObjects - This is the stack of objects requiring /// cleanup that are created by the current full expression. The /// element type here is ExprWithCleanups::Object. SmallVector<BlockDecl*, 8> ExprCleanupObjects; /// \brief Store a list of either DeclRefExprs or MemberExprs /// that contain a reference to a variable (constant) that may or may not /// be odr-used in this Expr, and we won't know until all lvalue-to-rvalue /// and discarded value conversions have been applied to all subexpressions /// of the enclosing full expression. This is cleared at the end of each /// full expression. llvm::SmallPtrSet<Expr*, 2> MaybeODRUseExprs; /// \brief Stack containing information about each of the nested /// function, block, and method scopes that are currently active. /// /// This array is never empty. Clients should ignore the first /// element, which is used to cache a single FunctionScopeInfo /// that's used to parse every top-level function. SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes; typedef LazyVector<TypedefNameDecl *, ExternalSemaSource, &ExternalSemaSource::ReadExtVectorDecls, 2, 2> ExtVectorDeclsType; /// ExtVectorDecls - This is a list all the extended vector types. This allows /// us to associate a raw vector type with one of the ext_vector type names. /// This is only necessary for issuing pretty diagnostics. ExtVectorDeclsType ExtVectorDecls; /// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes. std::unique_ptr<CXXFieldCollector> FieldCollector; typedef llvm::SmallSetVector<const NamedDecl*, 16> NamedDeclSetType; /// \brief Set containing all declared private fields that are not used. NamedDeclSetType UnusedPrivateFields; /// \brief Set containing all typedefs that are likely unused. llvm::SmallSetVector<const TypedefNameDecl *, 4> UnusedLocalTypedefNameCandidates; typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy; /// PureVirtualClassDiagSet - a set of class declarations which we have /// emitted a list of pure virtual functions. Used to prevent emitting the /// same list more than once. std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet; /// ParsingInitForAutoVars - a set of declarations with auto types for which /// we are currently parsing the initializer. llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars; /// \brief A mapping from external names to the most recent /// locally-scoped extern "C" declaration with that name. /// /// This map contains external declarations introduced in local /// scopes, e.g., /// /// \code /// extern "C" void f() { /// void foo(int, int); /// } /// \endcode /// /// Here, the name "foo" will be associated with the declaration of /// "foo" within f. This name is not visible outside of /// "f". However, we still find it in two cases: /// /// - If we are declaring another global or extern "C" entity with /// the name "foo", we can find "foo" as a previous declaration, /// so that the types of this external declaration can be checked /// for compatibility. /// /// - If we would implicitly declare "foo" (e.g., due to a call to /// "foo" in C when no prototype or definition is visible), then /// we find this declaration of "foo" and complain that it is /// not visible. llvm::DenseMap<DeclarationName, NamedDecl *> LocallyScopedExternCDecls; /// \brief Look for a locally scoped extern "C" declaration by the given name. NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name); typedef LazyVector<VarDecl *, ExternalSemaSource, &ExternalSemaSource::ReadTentativeDefinitions, 2, 2> TentativeDefinitionsType; /// \brief All the tentative definitions encountered in the TU. TentativeDefinitionsType TentativeDefinitions; typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2> UnusedFileScopedDeclsType; /// \brief The set of file scoped decls seen so far that have not been used /// and must warn if not used. Only contains the first declaration. UnusedFileScopedDeclsType UnusedFileScopedDecls; typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDelegatingConstructors, 2, 2> DelegatingCtorDeclsType; /// \brief All the delegating constructors seen so far in the file, used for /// cycle detection at the end of the TU. DelegatingCtorDeclsType DelegatingCtorDecls; /// \brief All the overriding functions seen during a class definition /// that had their exception spec checks delayed, plus the overridden /// function. SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2> DelayedExceptionSpecChecks; /// \brief All the members seen during a class definition which were both /// explicitly defaulted and had explicitly-specified exception /// specifications, along with the function type containing their /// user-specified exception specification. Those exception specifications /// were overridden with the default specifications, but we still need to /// check whether they are compatible with the default specification, and /// we can't do that until the nesting set of class definitions is complete. SmallVector<std::pair<CXXMethodDecl*, const FunctionProtoType*>, 2> DelayedDefaultedMemberExceptionSpecs; typedef llvm::DenseMap<const FunctionDecl *, LateParsedTemplate *> LateParsedTemplateMapT; LateParsedTemplateMapT LateParsedTemplateMap; /// \brief Callback to the parser to parse templated functions when needed. typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT); typedef void LateTemplateParserCleanupCB(void *P); LateTemplateParserCB *LateTemplateParser; LateTemplateParserCleanupCB *LateTemplateParserCleanup; void *OpaqueParser; void SetLateTemplateParser(LateTemplateParserCB *LTP, LateTemplateParserCleanupCB *LTPCleanup, void *P) { LateTemplateParser = LTP; LateTemplateParserCleanup = LTPCleanup; OpaqueParser = P; } class DelayedDiagnostics; class DelayedDiagnosticsState { sema::DelayedDiagnosticPool *SavedPool; friend class Sema::DelayedDiagnostics; }; typedef DelayedDiagnosticsState ParsingDeclState; typedef DelayedDiagnosticsState ProcessingContextState; /// A class which encapsulates the logic for delaying diagnostics /// during parsing and other processing. class DelayedDiagnostics { /// \brief The current pool of diagnostics into which delayed /// diagnostics should go. sema::DelayedDiagnosticPool *CurPool; public: DelayedDiagnostics() : CurPool(nullptr) {} /// Adds a delayed diagnostic. void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h /// Determines whether diagnostics should be delayed. bool shouldDelayDiagnostics() { return CurPool != nullptr; } /// Returns the current delayed-diagnostics pool. sema::DelayedDiagnosticPool *getCurrentPool() const { return CurPool; } /// Enter a new scope. Access and deprecation diagnostics will be /// collected in this pool. DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = &pool; return state; } /// Leave a delayed-diagnostic state that was previously pushed. /// Do not emit any of the diagnostics. This is performed as part /// of the bookkeeping of popping a pool "properly". void popWithoutEmitting(DelayedDiagnosticsState state) { CurPool = state.SavedPool; } /// Enter a new scope where access and deprecation diagnostics are /// not delayed. DelayedDiagnosticsState pushUndelayed() { DelayedDiagnosticsState state; state.SavedPool = CurPool; CurPool = nullptr; return state; } /// Undo a previous pushUndelayed(). void popUndelayed(DelayedDiagnosticsState state) { assert(CurPool == nullptr); CurPool = state.SavedPool; } } DelayedDiagnostics; /// A RAII object to temporarily push a declaration context. class ContextRAII { private: Sema &S; DeclContext *SavedContext; ProcessingContextState SavedContextState; QualType SavedCXXThisTypeOverride; public: ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true) : S(S), SavedContext(S.CurContext), SavedContextState(S.DelayedDiagnostics.pushUndelayed()), SavedCXXThisTypeOverride(S.CXXThisTypeOverride) { assert(ContextToPush && "pushing null context"); S.CurContext = ContextToPush; if (NewThisContext) S.CXXThisTypeOverride = QualType(); } void pop() { if (!SavedContext) return; S.CurContext = SavedContext; S.DelayedDiagnostics.popUndelayed(SavedContextState); S.CXXThisTypeOverride = SavedCXXThisTypeOverride; SavedContext = nullptr; } ~ContextRAII() { pop(); } }; /// \brief RAII object to handle the state changes required to synthesize /// a function body. class SynthesizedFunctionScope { Sema &S; Sema::ContextRAII SavedContext; public: SynthesizedFunctionScope(Sema &S, DeclContext *DC) : S(S), SavedContext(S, DC) { S.PushFunctionScope(); S.PushExpressionEvaluationContext(Sema::PotentiallyEvaluated); } ~SynthesizedFunctionScope() { S.PopExpressionEvaluationContext(); S.PopFunctionScopeInfo(); } }; /// WeakUndeclaredIdentifiers - Identifiers contained in /// \#pragma weak before declared. rare. may alias another /// identifier, declared or undeclared llvm::DenseMap<IdentifierInfo*,WeakInfo> WeakUndeclaredIdentifiers; /// ExtnameUndeclaredIdentifiers - Identifiers contained in /// \#pragma redefine_extname before declared. Used in Solaris system headers /// to define functions that occur in multiple standards to call the version /// in the currently selected standard. llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers; /// \brief Load weak undeclared identifiers from the external source. void LoadExternalWeakUndeclaredIdentifiers(); /// WeakTopLevelDecl - Translation-unit scoped declarations generated by /// \#pragma weak during processing of other Decls. /// I couldn't figure out a clean way to generate these in-line, so /// we store them here and handle separately -- which is a hack. /// It would be best to refactor this. SmallVector<Decl*,2> WeakTopLevelDecl; IdentifierResolver IdResolver; /// Translation Unit Scope - useful to Objective-C actions that need /// to lookup file scope declarations in the "ordinary" C decl namespace. /// For example, user-defined classes, built-in "id" type, etc. Scope *TUScope; /// \brief The C++ "std" namespace, where the standard library resides. LazyDeclPtr StdNamespace; /// \brief The C++ "std::bad_alloc" class, which is defined by the C++ /// standard library. LazyDeclPtr StdBadAlloc; /// \brief The C++ "std::initializer_list" template, which is defined in /// \<initializer_list>. ClassTemplateDecl *StdInitializerList; /// \brief The C++ "type_info" declaration, which is defined in \<typeinfo>. RecordDecl *CXXTypeInfoDecl; /// \brief The MSVC "_GUID" struct, which is defined in MSVC header files. RecordDecl *MSVCGuidDecl; /// \brief Caches identifiers/selectors for NSFoundation APIs. std::unique_ptr<NSAPI> NSAPIObj; /// \brief The declaration of the Objective-C NSNumber class. ObjCInterfaceDecl *NSNumberDecl; /// \brief Pointer to NSNumber type (NSNumber *). QualType NSNumberPointer; /// \brief The Objective-C NSNumber methods used to create NSNumber literals. ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods]; /// \brief The declaration of the Objective-C NSString class. ObjCInterfaceDecl *NSStringDecl; /// \brief Pointer to NSString type (NSString *). QualType NSStringPointer; /// \brief The declaration of the stringWithUTF8String: method. ObjCMethodDecl *StringWithUTF8StringMethod; /// \brief The declaration of the Objective-C NSArray class. ObjCInterfaceDecl *NSArrayDecl; /// \brief The declaration of the arrayWithObjects:count: method. ObjCMethodDecl *ArrayWithObjectsMethod; /// \brief The declaration of the Objective-C NSDictionary class. ObjCInterfaceDecl *NSDictionaryDecl; /// \brief The declaration of the dictionaryWithObjects:forKeys:count: method. ObjCMethodDecl *DictionaryWithObjectsMethod; /// \brief id<NSCopying> type. QualType QIDNSCopying; /// \brief will hold 'respondsToSelector:' Selector RespondsToSelectorSel; /// \brief counter for internal MS Asm label names. unsigned MSAsmLabelNameCounter; /// A flag to remember whether the implicit forms of operator new and delete /// have been declared. bool GlobalNewDeleteDeclared; /// A flag to indicate that we're in a context that permits abstract /// references to fields. This is really a bool AllowAbstractFieldReference; /// \brief Describes how the expressions currently being parsed are /// evaluated at run-time, if at all. enum ExpressionEvaluationContext { /// \brief The current expression and its subexpressions occur within an /// unevaluated operand (C++11 [expr]p7), such as the subexpression of /// \c sizeof, where the type of the expression may be significant but /// no code will be generated to evaluate the value of the expression at /// run time. Unevaluated, /// \brief The current expression occurs within an unevaluated /// operand that unconditionally permits abstract references to /// fields, such as a SIZE operator in MS-style inline assembly. UnevaluatedAbstract, /// \brief The current context is "potentially evaluated" in C++11 terms, /// but the expression is evaluated at compile-time (like the values of /// cases in a switch statement). ConstantEvaluated, /// \brief The current expression is potentially evaluated at run time, /// which means that code may be generated to evaluate the value of the /// expression at run time. PotentiallyEvaluated, /// \brief The current expression is potentially evaluated, but any /// declarations referenced inside that expression are only used if /// in fact the current expression is used. /// /// This value is used when parsing default function arguments, for which /// we would like to provide diagnostics (e.g., passing non-POD arguments /// through varargs) but do not want to mark declarations as "referenced" /// until the default argument is used. PotentiallyEvaluatedIfUsed }; /// \brief Data structure used to record current or nested /// expression evaluation contexts. struct ExpressionEvaluationContextRecord { /// \brief The expression evaluation context. ExpressionEvaluationContext Context; /// \brief Whether the enclosing context needed a cleanup. bool ParentNeedsCleanups; /// \brief Whether we are in a decltype expression. bool IsDecltype; /// \brief The number of active cleanup objects when we entered /// this expression evaluation context. unsigned NumCleanupObjects; /// \brief The number of typos encountered during this expression evaluation /// context (i.e. the number of TypoExprs created). unsigned NumTypos; llvm::SmallPtrSet<Expr*, 2> SavedMaybeODRUseExprs; /// \brief The lambdas that are present within this context, if it /// is indeed an unevaluated context. SmallVector<LambdaExpr *, 2> Lambdas; /// \brief The declaration that provides context for lambda expressions /// and block literals if the normal declaration context does not /// suffice, e.g., in a default function argument. Decl *ManglingContextDecl; /// \brief The context information used to mangle lambda expressions /// and block literals within this context. /// /// This mangling information is allocated lazily, since most contexts /// do not have lambda expressions or block literals. IntrusiveRefCntPtr<MangleNumberingContext> MangleNumbering; /// \brief If we are processing a decltype type, a set of call expressions /// for which we have deferred checking the completeness of the return type. SmallVector<CallExpr *, 8> DelayedDecltypeCalls; /// \brief If we are processing a decltype type, a set of temporary binding /// expressions for which we have deferred checking the destructor. SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds; ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context, unsigned NumCleanupObjects, bool ParentNeedsCleanups, Decl *ManglingContextDecl, bool IsDecltype) : Context(Context), ParentNeedsCleanups(ParentNeedsCleanups), IsDecltype(IsDecltype), NumCleanupObjects(NumCleanupObjects), NumTypos(0), ManglingContextDecl(ManglingContextDecl), MangleNumbering() { } /// \brief Retrieve the mangling numbering context, used to consistently /// number constructs like lambdas for mangling. MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx); bool isUnevaluated() const { return Context == Unevaluated || Context == UnevaluatedAbstract; } }; /// A stack of expression evaluation contexts. SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts; /// \brief Compute the mangling number context for a lambda expression or /// block literal. /// /// \param DC - The DeclContext containing the lambda expression or /// block literal. /// \param[out] ManglingContextDecl - Returns the ManglingContextDecl /// associated with the context, if relevant. MangleNumberingContext *getCurrentMangleNumberContext( const DeclContext *DC, Decl *&ManglingContextDecl); /// SpecialMemberOverloadResult - The overloading result for a special member /// function. /// /// This is basically a wrapper around PointerIntPair. The lowest bits of the /// integer are used to determine whether overload resolution succeeded. class SpecialMemberOverloadResult : public llvm::FastFoldingSetNode { public: enum Kind { NoMemberOrDeleted, Ambiguous, Success }; private: llvm::PointerIntPair<CXXMethodDecl*, 2> Pair; public: SpecialMemberOverloadResult(const llvm::FoldingSetNodeID &ID) : FastFoldingSetNode(ID) {} CXXMethodDecl *getMethod() const { return Pair.getPointer(); } void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); } Kind getKind() const { return static_cast<Kind>(Pair.getInt()); } void setKind(Kind K) { Pair.setInt(K); } }; /// \brief A cache of special member function overload resolution results /// for C++ records. llvm::FoldingSet<SpecialMemberOverloadResult> SpecialMemberCache; /// \brief The kind of translation unit we are processing. /// /// When we're processing a complete translation unit, Sema will perform /// end-of-translation-unit semantic tasks (such as creating /// initializers for tentative definitions in C) once parsing has /// completed. Modules and precompiled headers perform different kinds of /// checks. TranslationUnitKind TUKind; llvm::BumpPtrAllocator BumpAlloc; /// \brief The number of SFINAE diagnostics that have been trapped. unsigned NumSFINAEErrors; typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>> UnparsedDefaultArgInstantiationsMap; /// \brief A mapping from parameters with unparsed default arguments to the /// set of instantiations of each parameter. /// /// This mapping is a temporary data structure used when parsing /// nested class templates or nested classes of class templates, /// where we might end up instantiating an inner class before the /// default arguments of its methods have been parsed. UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations; // Contains the locations of the beginning of unparsed default // argument locations. llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs; /// UndefinedInternals - all the used, undefined objects which require a /// definition in this translation unit. llvm::DenseMap<NamedDecl *, SourceLocation> UndefinedButUsed; /// Obtain a sorted list of functions that are undefined but ODR-used. void getUndefinedButUsed( SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined); typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods; typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool; /// Method Pool - allows efficient lookup when typechecking messages to "id". /// We need to maintain a list, since selectors can have differing signatures /// across classes. In Cocoa, this happens to be extremely uncommon (only 1% /// of selectors are "overloaded"). /// At the head of the list it is recorded whether there were 0, 1, or >= 2 /// methods inside categories with a particular selector. GlobalMethodPool MethodPool; /// Method selectors used in a \@selector expression. Used for implementation /// of -Wselector. llvm::DenseMap<Selector, SourceLocation> ReferencedSelectors; /// Kinds of C++ special members. enum CXXSpecialMember { CXXDefaultConstructor, CXXCopyConstructor, CXXMoveConstructor, CXXCopyAssignment, CXXMoveAssignment, CXXDestructor, CXXInvalid }; typedef std::pair<CXXRecordDecl*, CXXSpecialMember> SpecialMemberDecl; /// The C++ special members which we are currently in the process of /// declaring. If this process recursively triggers the declaration of the /// same special member, we should act as if it is not yet declared. llvm::SmallSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared; void ReadMethodPool(Selector Sel); /// Private Helper predicate to check for 'self'. bool isSelfExpr(Expr *RExpr); bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method); /// \brief Cause the active diagnostic on the DiagosticsEngine to be /// emitted. This is closely coupled to the SemaDiagnosticBuilder class and /// should not be used elsewhere. void EmitCurrentDiagnostic(unsigned DiagID); /// Records and restores the FP_CONTRACT state on entry/exit of compound /// statements. class FPContractStateRAII { public: FPContractStateRAII(Sema& S) : S(S), OldFPContractState(S.FPFeatures.fp_contract) {} ~FPContractStateRAII() { S.FPFeatures.fp_contract = OldFPContractState; } private: Sema& S; bool OldFPContractState : 1; }; void addImplicitTypedef(StringRef Name, QualType T); public: Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer, TranslationUnitKind TUKind = TU_Complete, CodeCompleteConsumer *CompletionConsumer = nullptr); ~Sema(); /// \brief Perform initialization that occurs after the parser has been /// initialized but before it parses anything. void Initialize(); const LangOptions &getLangOpts() const { return LangOpts; } OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; } FPOptions &getFPOptions() { return FPFeatures; } DiagnosticsEngine &getDiagnostics() const { return Diags; } SourceManager &getSourceManager() const { return SourceMgr; } Preprocessor &getPreprocessor() const { return PP; } ASTContext &getASTContext() const { return Context; } ASTConsumer &getASTConsumer() const { return Consumer; } ASTMutationListener *getASTMutationListener() const; ExternalSemaSource* getExternalSource() const { return ExternalSource; } ///\brief Registers an external source. If an external source already exists, /// creates a multiplex external source and appends to it. /// ///\param[in] E - A non-null external sema source. /// void addExternalSource(ExternalSemaSource *E); void PrintStats() const; /// \brief Helper class that creates diagnostics with optional /// template instantiation stacks. /// /// This class provides a wrapper around the basic DiagnosticBuilder /// class that emits diagnostics. SemaDiagnosticBuilder is /// responsible for emitting the diagnostic (as DiagnosticBuilder /// does) and, if the diagnostic comes from inside a template /// instantiation, printing the template instantiation stack as /// well. class SemaDiagnosticBuilder : public DiagnosticBuilder { Sema &SemaRef; unsigned DiagID; public: SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID) : DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { } ~SemaDiagnosticBuilder() { // If we aren't active, there is nothing to do. if (!isActive()) return; // Otherwise, we need to emit the diagnostic. First flush the underlying // DiagnosticBuilder data, and clear the diagnostic builder itself so it // won't emit the diagnostic in its own destructor. // // This seems wasteful, in that as written the DiagnosticBuilder dtor will // do its own needless checks to see if the diagnostic needs to be // emitted. However, because we take care to ensure that the builder // objects never escape, a sufficiently smart compiler will be able to // eliminate that code. FlushCounts(); Clear(); // Dispatch to Sema to emit the diagnostic. SemaRef.EmitCurrentDiagnostic(DiagID); } /// Teach operator<< to produce an object of the correct type. template<typename T> friend const SemaDiagnosticBuilder &operator<<( const SemaDiagnosticBuilder &Diag, const T &Value) { const DiagnosticBuilder &BaseDiag = Diag; BaseDiag << Value; return Diag; } }; /// \brief Emit a diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) { DiagnosticBuilder DB = Diags.Report(Loc, DiagID); return SemaDiagnosticBuilder(DB, *this, DiagID); } /// \brief Emit a partial diagnostic. SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD); /// \brief Build a partial diagnostic. PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h bool findMacroSpelling(SourceLocation &loc, StringRef name); /// \brief Get a string to suggest for zero-initialization of a type. std::string getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const; std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const; /// \brief Calls \c Lexer::getLocForEndOfToken() SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0); /// \brief Retrieve the module loader associated with the preprocessor. ModuleLoader &getModuleLoader() const; void emitAndClearUnusedLocalTypedefWarnings(); void ActOnEndOfTranslationUnit(); void CheckDelegatingCtorCycles(); Scope *getScopeForContext(DeclContext *Ctx); void PushFunctionScope(); void PushBlockScope(Scope *BlockScope, BlockDecl *Block); sema::LambdaScopeInfo *PushLambdaScope(); /// \brief This is used to inform Sema what the current TemplateParameterDepth /// is during Parsing. Currently it is used to pass on the depth /// when parsing generic lambda 'auto' parameters. void RecordParsingTemplateParameterDepth(unsigned Depth); void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD, RecordDecl *RD, CapturedRegionKind K); void PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr, const Decl *D = nullptr, const BlockExpr *blkExpr = nullptr); sema::FunctionScopeInfo *getCurFunction() const { return FunctionScopes.back(); } sema::FunctionScopeInfo *getEnclosingFunction() const { if (FunctionScopes.empty()) return nullptr; for (int e = FunctionScopes.size()-1; e >= 0; --e) { if (isa<sema::BlockScopeInfo>(FunctionScopes[e])) continue; return FunctionScopes[e]; } return nullptr; } template <typename ExprT> void recordUseOfEvaluatedWeak(const ExprT *E, bool IsRead=true) { if (!isUnevaluatedContext()) getCurFunction()->recordUseOfWeak(E, IsRead); } void PushCompoundScope(); void PopCompoundScope(); sema::CompoundScopeInfo &getCurCompoundScope() const; bool hasAnyUnrecoverableErrorsInThisFunction() const; /// \brief Retrieve the current block, if any. sema::BlockScopeInfo *getCurBlock(); /// \brief Retrieve the current lambda scope info, if any. sema::LambdaScopeInfo *getCurLambda(); /// \brief Retrieve the current generic lambda info, if any. sema::LambdaScopeInfo *getCurGenericLambda(); /// \brief Retrieve the current captured region, if any. sema::CapturedRegionScopeInfo *getCurCapturedRegion(); /// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; } void ActOnComment(SourceRange Comment); //===--------------------------------------------------------------------===// // Type Analysis / Processing: SemaType.cpp. // QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs, const DeclSpec *DS = nullptr); QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA, const DeclSpec *DS = nullptr); QualType BuildPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildReferenceType(QualType T, bool LValueRef, SourceLocation Loc, DeclarationName Entity); QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM, Expr *ArraySize, unsigned Quals, SourceRange Brackets, DeclarationName Entity); QualType BuildExtVectorType(QualType T, Expr *ArraySize, SourceLocation AttrLoc); bool CheckFunctionReturnType(QualType T, SourceLocation Loc); /// \brief Build a function type. /// /// This routine checks the function type according to C++ rules and /// under the assumption that the result type and parameter types have /// just been instantiated from a template. It therefore duplicates /// some of the behavior of GetTypeForDeclarator, but in a much /// simpler form that is only suitable for this narrow use case. /// /// \param T The return type of the function. /// /// \param ParamTypes The parameter types of the function. This array /// will be modified to account for adjustments to the types of the /// function parameters. /// /// \param Loc The location of the entity whose type involves this /// function type or, if there is no such entity, the location of the /// type that will have function type. /// /// \param Entity The name of the entity that involves the function /// type, if known. /// /// \param EPI Extra information about the function type. Usually this will /// be taken from an existing function with the same prototype. /// /// \returns A suitable function type, if there are no errors. The /// unqualified type will always be a FunctionProtoType. /// Otherwise, returns a NULL type. QualType BuildFunctionType(QualType T, MutableArrayRef<QualType> ParamTypes, SourceLocation Loc, DeclarationName Entity, const FunctionProtoType::ExtProtoInfo &EPI); QualType BuildMemberPointerType(QualType T, QualType Class, SourceLocation Loc, DeclarationName Entity); QualType BuildBlockPointerType(QualType T, SourceLocation Loc, DeclarationName Entity); QualType BuildParenType(QualType T); QualType BuildAtomicType(QualType T, SourceLocation Loc); TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S); TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy); TypeSourceInfo *GetTypeSourceInfoForDeclarator(Declarator &D, QualType T, TypeSourceInfo *ReturnTypeInfo); /// \brief Package the given type and TSI into a ParsedType. ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo); DeclarationNameInfo GetNameForDeclarator(Declarator &D); DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name); static QualType GetTypeFromParser(ParsedType Ty, TypeSourceInfo **TInfo = nullptr); CanThrowResult canThrow(const Expr *E); const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc, const FunctionProtoType *FPT); void UpdateExceptionSpec(FunctionDecl *FD, const FunctionProtoType::ExceptionSpecInfo &ESI); bool CheckSpecifiedExceptionType(QualType &T, const SourceRange &Range); bool CheckDistantExceptionSpec(QualType T); bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New); bool CheckEquivalentExceptionSpec( const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc); bool CheckEquivalentExceptionSpec( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Old, SourceLocation OldLoc, const FunctionProtoType *New, SourceLocation NewLoc, bool *MissingExceptionSpecification = nullptr, bool *MissingEmptyExceptionSpecification = nullptr, bool AllowNoexceptAllMatchWithNoSpec = false, bool IsOperatorNew = false); bool CheckExceptionSpecSubset( const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID, const FunctionProtoType *Superset, SourceLocation SuperLoc, const FunctionProtoType *Subset, SourceLocation SubLoc); bool CheckParamExceptionSpec(const PartialDiagnostic & NoteID, const FunctionProtoType *Target, SourceLocation TargetLoc, const FunctionProtoType *Source, SourceLocation SourceLoc); TypeResult ActOnTypeName(Scope *S, Declarator &D); /// \brief The parser has parsed the context-sensitive type 'instancetype' /// in an Objective-C message declaration. Return the appropriate type. ParsedType ActOnObjCInstanceType(SourceLocation Loc); /// \brief Abstract class used to diagnose incomplete types. struct TypeDiagnoser { bool Suppressed; TypeDiagnoser(bool Suppressed = false) : Suppressed(Suppressed) { } virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0; virtual ~TypeDiagnoser() {} }; static int getPrintable(int I) { return I; } static unsigned getPrintable(unsigned I) { return I; } static bool getPrintable(bool B) { return B; } static const char * getPrintable(const char *S) { return S; } static StringRef getPrintable(StringRef S) { return S; } static const std::string &getPrintable(const std::string &S) { return S; } static const IdentifierInfo *getPrintable(const IdentifierInfo *II) { return II; } static DeclarationName getPrintable(DeclarationName N) { return N; } static QualType getPrintable(QualType T) { return T; } static SourceRange getPrintable(SourceRange R) { return R; } static SourceRange getPrintable(SourceLocation L) { return L; } static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); } static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();} template<typename T1> class BoundTypeDiagnoser1 : public TypeDiagnoser { unsigned DiagID; const T1 &Arg1; public: BoundTypeDiagnoser1(unsigned DiagID, const T1 &Arg1) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1) { } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; S.Diag(Loc, DiagID) << getPrintable(Arg1) << T; } virtual ~BoundTypeDiagnoser1() { } }; template<typename T1, typename T2> class BoundTypeDiagnoser2 : public TypeDiagnoser { unsigned DiagID; const T1 &Arg1; const T2 &Arg2; public: BoundTypeDiagnoser2(unsigned DiagID, const T1 &Arg1, const T2 &Arg2) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1), Arg2(Arg2) { } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; S.Diag(Loc, DiagID) << getPrintable(Arg1) << getPrintable(Arg2) << T; } virtual ~BoundTypeDiagnoser2() { } }; template<typename T1, typename T2, typename T3> class BoundTypeDiagnoser3 : public TypeDiagnoser { unsigned DiagID; const T1 &Arg1; const T2 &Arg2; const T3 &Arg3; public: BoundTypeDiagnoser3(unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) : TypeDiagnoser(DiagID == 0), DiagID(DiagID), Arg1(Arg1), Arg2(Arg2), Arg3(Arg3) { } void diagnose(Sema &S, SourceLocation Loc, QualType T) override { if (Suppressed) return; S.Diag(Loc, DiagID) << getPrintable(Arg1) << getPrintable(Arg2) << getPrintable(Arg3) << T; } virtual ~BoundTypeDiagnoser3() { } }; private: bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); public: bool RequireCompleteType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID); template<typename T1> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireCompleteType(Loc, T, Diagnoser); } template<typename T1, typename T2> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireCompleteType(Loc, T, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireCompleteType(Loc, T, Diagnoser); } bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser); bool RequireCompleteExprType(Expr *E, unsigned DiagID); template<typename T1> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireCompleteExprType(E, Diagnoser); } template<typename T1, typename T2> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireCompleteExprType(E, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireCompleteExprType(Expr *E, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireCompleteExprType(E, Diagnoser); } bool RequireLiteralType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID); template<typename T1> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireLiteralType(Loc, T, Diagnoser); } template<typename T1, typename T2> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireLiteralType(Loc, T, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireLiteralType(Loc, T, Diagnoser); } QualType getElaboratedType(ElaboratedTypeKeyword Keyword, const CXXScopeSpec &SS, QualType T); QualType BuildTypeofExprType(Expr *E, SourceLocation Loc); /// If AsUnevaluated is false, E is treated as though it were an evaluated /// context, such as when building a type for decltype(auto). QualType BuildDecltypeType(Expr *E, SourceLocation Loc, bool AsUnevaluated = true); QualType BuildUnaryTransformType(QualType BaseType, UnaryTransformType::UTTKind UKind, SourceLocation Loc); //===--------------------------------------------------------------------===// // Symbol table / Decl tracking callbacks: SemaDecl.cpp. // /// List of decls defined in a function prototype. This contains EnumConstants /// that incorrectly end up in translation unit scope because there is no /// function to pin them on. ActOnFunctionDeclarator reads this list and patches /// them into the FunctionDecl. std::vector<NamedDecl*> DeclsInPrototypeScope; DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr); void DiagnoseUseOfUnimplementedSelectors(); bool isSimpleTypeSpecifier(tok::TokenKind Kind) const; ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec *SS = nullptr, bool isClassName = false, bool HasTrailingDot = false, ParsedType ObjectType = ParsedType(), bool IsCtorOrDtorName = false, bool WantNontrivialTypeSourceInfo = false, IdentifierInfo **CorrectedII = nullptr); TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S); bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S); void DiagnoseUnknownTypeName(IdentifierInfo *&II, SourceLocation IILoc, Scope *S, CXXScopeSpec *SS, ParsedType &SuggestedType, bool AllowClassTemplates = false); /// \brief For compatibility with MSVC, we delay parsing of some default /// template type arguments until instantiation time. Emits a warning and /// returns a synthesized DependentNameType that isn't really dependent on any /// other template arguments. ParsedType ActOnDelayedDefaultTemplateArg(const IdentifierInfo &II, SourceLocation NameLoc); /// \brief Describes the result of the name lookup and resolution performed /// by \c ClassifyName(). enum NameClassificationKind { NC_Unknown, NC_Error, NC_Keyword, NC_Type, NC_Expression, NC_NestedNameSpecifier, NC_TypeTemplate, NC_VarTemplate, NC_FunctionTemplate }; class NameClassification { NameClassificationKind Kind; ExprResult Expr; TemplateName Template; ParsedType Type; const IdentifierInfo *Keyword; explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {} public: NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {} NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {} NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword), Keyword(Keyword) { } static NameClassification Error() { return NameClassification(NC_Error); } static NameClassification Unknown() { return NameClassification(NC_Unknown); } static NameClassification NestedNameSpecifier() { return NameClassification(NC_NestedNameSpecifier); } static NameClassification TypeTemplate(TemplateName Name) { NameClassification Result(NC_TypeTemplate); Result.Template = Name; return Result; } static NameClassification VarTemplate(TemplateName Name) { NameClassification Result(NC_VarTemplate); Result.Template = Name; return Result; } static NameClassification FunctionTemplate(TemplateName Name) { NameClassification Result(NC_FunctionTemplate); Result.Template = Name; return Result; } NameClassificationKind getKind() const { return Kind; } ParsedType getType() const { assert(Kind == NC_Type); return Type; } ExprResult getExpression() const { assert(Kind == NC_Expression); return Expr; } TemplateName getTemplateName() const { assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate || Kind == NC_VarTemplate); return Template; } TemplateNameKind getTemplateNameKind() const { switch (Kind) { case NC_TypeTemplate: return TNK_Type_template; case NC_FunctionTemplate: return TNK_Function_template; case NC_VarTemplate: return TNK_Var_template; default: llvm_unreachable("unsupported name classification."); } } }; /// \brief Perform name lookup on the given name, classifying it based on /// the results of name lookup and the following token. /// /// This routine is used by the parser to resolve identifiers and help direct /// parsing. When the identifier cannot be found, this routine will attempt /// to correct the typo and classify based on the resulting name. /// /// \param S The scope in which we're performing name lookup. /// /// \param SS The nested-name-specifier that precedes the name. /// /// \param Name The identifier. If typo correction finds an alternative name, /// this pointer parameter will be updated accordingly. /// /// \param NameLoc The location of the identifier. /// /// \param NextToken The token following the identifier. Used to help /// disambiguate the name. /// /// \param IsAddressOfOperand True if this name is the operand of a unary /// address of ('&') expression, assuming it is classified as an /// expression. /// /// \param CCC The correction callback, if typo correction is desired. NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS, IdentifierInfo *&Name, SourceLocation NameLoc, const Token &NextToken, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr); Decl *ActOnDeclarator(Scope *S, Declarator &D); NamedDecl *HandleDeclarator(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParameterLists); void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S); bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info); bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC, DeclarationName Name, SourceLocation Loc); void diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals, SourceLocation FallbackLoc, SourceLocation ConstQualLoc = SourceLocation(), SourceLocation VolatileQualLoc = SourceLocation(), SourceLocation RestrictQualLoc = SourceLocation(), SourceLocation AtomicQualLoc = SourceLocation()); static bool adjustContextForLocalExternDecl(DeclContext *&DC); void DiagnoseFunctionSpecifiers(const DeclSpec &DS); void CheckShadow(Scope *S, VarDecl *D, const LookupResult& R); void CheckShadow(Scope *S, VarDecl *D); void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange); void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D); NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous); NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D, LookupResult &Previous, bool &Redeclaration); NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); // Returns true if the variable declaration is a redeclaration bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous); void CheckVariableDeclarationType(VarDecl *NewVD); void CheckCompleteVariableDeclaration(VarDecl *var); void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D); NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC, TypeSourceInfo *TInfo, LookupResult &Previous, MultiTemplateParamsArg TemplateParamLists, bool &AddToScope); bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD); bool CheckConstexprFunctionDecl(const FunctionDecl *FD); bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body); void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD); void FindHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); void NoteHiddenVirtualMethods(CXXMethodDecl *MD, SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods); // Returns true if the function declaration is a redeclaration bool CheckFunctionDeclaration(Scope *S, FunctionDecl *NewFD, LookupResult &Previous, bool IsExplicitSpecialization); void CheckMain(FunctionDecl *FD, const DeclSpec &D); void CheckMSVCRTEntryPoint(FunctionDecl *FD); Decl *ActOnParamDeclarator(Scope *S, Declarator &D); ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC, SourceLocation Loc, QualType T); ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc, SourceLocation NameLoc, IdentifierInfo *Name, QualType T, TypeSourceInfo *TSInfo, StorageClass SC); void ActOnParamDefaultArgument(Decl *param, SourceLocation EqualLoc, Expr *defarg); void ActOnParamUnparsedDefaultArgument(Decl *param, SourceLocation EqualLoc, SourceLocation ArgLoc); void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc); bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg, SourceLocation EqualLoc); void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit, bool TypeMayContainAuto); void ActOnUninitializedDecl(Decl *dcl, bool TypeMayContainAuto); void ActOnInitializerError(Decl *Dcl); void ActOnCXXForRangeDecl(Decl *D); StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc, IdentifierInfo *Ident, ParsedAttributes &Attrs, SourceLocation AttrEnd); void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc); void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc); void FinalizeDeclaration(Decl *D); DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS, ArrayRef<Decl *> Group); DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group, bool TypeMayContainAuto = true); /// Should be called on all declarations that might have attached /// documentation comments. void ActOnDocumentableDecl(Decl *D); void ActOnDocumentableDecls(ArrayRef<Decl *> Group); void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D, SourceLocation LocAfterDecls); void CheckForFunctionRedefinition(FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr); Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D); Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D); void ActOnStartOfObjCMethodDef(Scope *S, Decl *D); bool isObjCMethodDecl(Decl *D) { return D && isa<ObjCMethodDecl>(D); } /// \brief Determine whether we can delay parsing the body of a function or /// function template until it is used, assuming we don't care about emitting /// code for that function. /// /// This will be \c false if we may need the body of the function in the /// middle of parsing an expression (where it's impractical to switch to /// parsing a different function), for instance, if it's constexpr in C++11 /// or has an 'auto' return type in C++14. These cases are essentially bugs. bool canDelayFunctionBody(const Declarator &D); /// \brief Determine whether we can skip parsing the body of a function /// definition, assuming we don't care about analyzing its body or emitting /// code for that function. /// /// This will be \c false only if we may need the body of the function in /// order to parse the rest of the program (for instance, if it is /// \c constexpr in C++11 or has an 'auto' return type in C++14). bool canSkipFunctionBody(Decl *D); void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body); Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation); Decl *ActOnSkippedFunctionBody(Decl *Decl); void ActOnFinishInlineMethodDef(CXXMethodDecl *D); /// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an /// attribute for which parsing is delayed. void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs); /// \brief Diagnose any unused parameters in the given sequence of /// ParmVarDecl pointers. void DiagnoseUnusedParameters(ParmVarDecl * const *Begin, ParmVarDecl * const *End); /// \brief Diagnose whether the size of parameters or return value of a /// function or obj-c method definition is pass-by-value and larger than a /// specified threshold. void DiagnoseSizeOfParametersAndReturnValue(ParmVarDecl * const *Begin, ParmVarDecl * const *End, QualType ReturnTy, NamedDecl *D); void DiagnoseInvalidJumps(Stmt *Body); Decl *ActOnFileScopeAsmDecl(Expr *expr, SourceLocation AsmLoc, SourceLocation RParenLoc); /// \brief Handle a C++11 empty-declaration and attribute-declaration. Decl *ActOnEmptyDeclaration(Scope *S, AttributeList *AttrList, SourceLocation SemiLoc); /// \brief The parser has processed a module import declaration. /// /// \param AtLoc The location of the '@' symbol, if any. /// /// \param ImportLoc The location of the 'import' keyword. /// /// \param Path The module access path. DeclResult ActOnModuleImport(SourceLocation AtLoc, SourceLocation ImportLoc, ModuleIdPath Path); /// \brief The parser has processed a module import translated from a /// #include or similar preprocessing directive. void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod); /// \brief Create an implicit import of the given module at the given /// source location, for error recovery, if possible. /// /// This routine is typically used when an entity found by name lookup /// is actually hidden within a module that we know about but the user /// has forgotten to import. void createImplicitModuleImportForErrorRecovery(SourceLocation Loc, Module *Mod); /// \brief Retrieve a suitable printing policy. PrintingPolicy getPrintingPolicy() const { return getPrintingPolicy(Context, PP); } /// \brief Retrieve a suitable printing policy. static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx, const Preprocessor &PP); /// Scope actions. void ActOnPopScope(SourceLocation Loc, Scope *S); void ActOnTranslationUnitScope(Scope *S); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS); Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS, MultiTemplateParamsArg TemplateParams, bool IsExplicitInstantiation = false); Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS, AccessSpecifier AS, RecordDecl *Record, const PrintingPolicy &Policy); Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS, RecordDecl *Record); bool isAcceptableTagRedeclaration(const TagDecl *Previous, TagTypeKind NewTag, bool isDefinition, SourceLocation NewTagLoc, const IdentifierInfo &Name); enum TagUseKind { TUK_Reference, // Reference to a tag: 'struct foo *X;' TUK_Declaration, // Fwd decl of a tag: 'struct foo;' TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;' TUK_Friend // Friend declaration: 'friend struct foo;' }; Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, AccessSpecifier AS, SourceLocation ModulePrivateLoc, MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl, bool &IsDependent, SourceLocation ScopedEnumKWLoc, bool ScopedEnumUsesClassTag, TypeResult UnderlyingType, bool IsTypeSpecifier); Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc, unsigned TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, MultiTemplateParamsArg TempParamLists); TypeResult ActOnDependentTag(Scope *S, unsigned TagSpec, TagUseKind TUK, const CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation TagLoc, SourceLocation NameLoc); void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart, IdentifierInfo *ClassName, SmallVectorImpl<Decl *> &Decls); Decl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth); FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS); MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, InClassInitStyle InitStyle, AccessSpecifier AS, AttributeList *MSPropertyAttr); FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T, TypeSourceInfo *TInfo, RecordDecl *Record, SourceLocation Loc, bool Mutable, Expr *BitfieldWidth, InClassInitStyle InitStyle, SourceLocation TSSL, AccessSpecifier AS, NamedDecl *PrevDecl, Declarator *D = nullptr); bool CheckNontrivialField(FieldDecl *FD); void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM); bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD); void ActOnLastBitfield(SourceLocation DeclStart, SmallVectorImpl<Decl *> &AllIvarDecls); Decl *ActOnIvar(Scope *S, SourceLocation DeclStart, Declarator &D, Expr *BitfieldWidth, tok::ObjCKeywordKind visibility); // This is used for both record definitions and ObjC interface declarations. void ActOnFields(Scope* S, SourceLocation RecLoc, Decl *TagDecl, ArrayRef<Decl *> Fields, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); /// ActOnTagStartDefinition - Invoked when we have entered the /// scope of a tag's definition (e.g., for an enumeration, class, /// struct, or union). void ActOnTagStartDefinition(Scope *S, Decl *TagDecl); Decl *ActOnObjCContainerStartDefinition(Decl *IDecl); /// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a /// C++ record definition's base-specifiers clause and are starting its /// member declarations. void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl, SourceLocation FinalLoc, bool IsFinalSpelledSealed, SourceLocation LBraceLoc); /// ActOnTagFinishDefinition - Invoked once we have finished parsing /// the definition of a tag (enumeration, class, struct, or union). void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl, SourceLocation RBraceLoc); void ActOnObjCContainerFinishDefinition(); /// \brief Invoked when we must temporarily exit the objective-c container /// scope for parsing/looking-up C constructs. /// /// Must be followed by a call to \see ActOnObjCReenterContainerContext void ActOnObjCTemporaryExitContainerContext(DeclContext *DC); void ActOnObjCReenterContainerContext(DeclContext *DC); /// ActOnTagDefinitionError - Invoked when there was an unrecoverable /// error parsing the definition of a tag. void ActOnTagDefinitionError(Scope *S, Decl *TagDecl); EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum, EnumConstantDecl *LastEnumConst, SourceLocation IdLoc, IdentifierInfo *Id, Expr *val); bool CheckEnumUnderlyingType(TypeSourceInfo *TI); bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped, QualType EnumUnderlyingTy, const EnumDecl *Prev); Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant, SourceLocation IdLoc, IdentifierInfo *Id, AttributeList *Attrs, SourceLocation EqualLoc, Expr *Val); void ActOnEnumBody(SourceLocation EnumLoc, SourceLocation LBraceLoc, SourceLocation RBraceLoc, Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S, AttributeList *Attr); DeclContext *getContainingDC(DeclContext *DC); /// Set the current declaration context until it gets popped. void PushDeclContext(Scope *S, DeclContext *DC); void PopDeclContext(); /// EnterDeclaratorContext - Used when we must lookup names in the context /// of a declarator's nested name specifier. void EnterDeclaratorContext(Scope *S, DeclContext *DC); void ExitDeclaratorContext(Scope *S); /// Push the parameters of D, which must be a function, into scope. void ActOnReenterFunctionContext(Scope* S, Decl* D); void ActOnExitFunctionContext(); DeclContext *getFunctionLevelDeclContext(); /// getCurFunctionDecl - If inside of a function body, this returns a pointer /// to the function decl for the function being parsed. If we're currently /// in a 'block', this returns the containing context. FunctionDecl *getCurFunctionDecl(); /// getCurMethodDecl - If inside of a method body, this returns a pointer to /// the method decl for the method being parsed. If we're currently /// in a 'block', this returns the containing context. ObjCMethodDecl *getCurMethodDecl(); /// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method /// or C function we're in, otherwise return null. If we're currently /// in a 'block', this returns the containing context. NamedDecl *getCurFunctionOrMethodDecl(); /// Add this decl to the scope shadowed decl chains. void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true); /// \brief Make the given externally-produced declaration visible at the /// top level scope. /// /// \param D The externally-produced declaration to push. /// /// \param Name The name of the externally-produced declaration. void pushExternalDeclIntoScope(NamedDecl *D, DeclarationName Name); /// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true /// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns /// true if 'D' belongs to the given declaration context. /// /// \param AllowInlineNamespace If \c true, allow the declaration to be in the /// enclosing namespace set of the context, rather than contained /// directly within it. bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr, bool AllowInlineNamespace = false); /// Finds the scope corresponding to the given decl context, if it /// happens to be an enclosing scope. Otherwise return NULL. static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC); /// Subroutines of ActOnDeclarator(). TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T, TypeSourceInfo *TInfo); bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New); /// Attribute merging methods. Return true if a new attribute was added. AvailabilityAttr *mergeAvailabilityAttr(NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted, bool IsUnavailable, StringRef Message, bool Override, unsigned AttrSpellingListIndex); TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range, TypeVisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range, VisibilityAttr::VisibilityType Vis, unsigned AttrSpellingListIndex); DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MSInheritanceAttr * mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase, unsigned AttrSpellingListIndex, MSInheritanceAttr::Spelling SemanticSpelling); FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range, IdentifierInfo *Format, int FormatIdx, int FirstArg, unsigned AttrSpellingListIndex); SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name, unsigned AttrSpellingListIndex); AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range, unsigned AttrSpellingListIndex); /// \brief Describes the kind of merge to perform for availability /// attributes (including "deprecated", "unavailable", and "availability"). enum AvailabilityMergeKind { /// \brief Don't merge availability attributes at all. AMK_None, /// \brief Merge availability attributes for a redeclaration, which requires /// an exact match. AMK_Redeclaration, /// \brief Merge availability attributes for an override, which requires /// an exact match or a weakening of constraints. AMK_Override }; void mergeDeclAttributes(NamedDecl *New, Decl *Old, AvailabilityMergeKind AMK = AMK_Redeclaration); void MergeTypedefNameDecl(TypedefNameDecl *New, LookupResult &OldDecls); bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S, bool MergeTypeWithOld); bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old, Scope *S, bool MergeTypeWithOld); void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old); void MergeVarDecl(VarDecl *New, LookupResult &Previous); void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld); void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old); bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S); // AssignmentAction - This is used by all the assignment diagnostic functions // to represent what is actually causing the operation enum AssignmentAction { AA_Assigning, AA_Passing, AA_Returning, AA_Converting, AA_Initializing, AA_Sending, AA_Casting, AA_Passing_CFAudited }; /// C++ Overloading. enum OverloadKind { /// This is a legitimate overload: the existing declarations are /// functions or function templates with different signatures. Ovl_Overload, /// This is not an overload because the signature exactly matches /// an existing declaration. Ovl_Match, /// This is not an overload because the lookup results contain a /// non-function. Ovl_NonFunction }; OverloadKind CheckOverload(Scope *S, FunctionDecl *New, const LookupResult &OldDecls, NamedDecl *&OldDecl, bool IsForUsingDecl); bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl); /// \brief Checks availability of the function depending on the current /// function context.Inside an unavailable function,unavailability is ignored. /// /// \returns true if \p FD is unavailable and current context is inside /// an available function, false otherwise. bool isFunctionConsideredUnavailable(FunctionDecl *FD); ImplicitConversionSequence TryImplicitConversion(Expr *From, QualType ToType, bool SuppressUserConversions, bool AllowExplicit, bool InOverloadResolution, bool CStyle, bool AllowObjCWritebackConversion); bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType); bool IsFloatingPointPromotion(QualType FromType, QualType ToType); bool IsComplexPromotion(QualType FromType, QualType ToType); bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType, bool &IncompatibleObjC); bool isObjCWritebackConversion(QualType FromType, QualType ToType, QualType &ConvertedType); bool IsBlockPointerConversion(QualType FromType, QualType ToType, QualType& ConvertedType); bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType, const FunctionProtoType *NewType, unsigned *ArgPos = nullptr); void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag, QualType FromType, QualType ToType); CastKind PrepareCastToObjCObjectPointer(ExprResult &E); bool CheckPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath& BasePath, bool IgnoreBaseAccess); bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType, bool InOverloadResolution, QualType &ConvertedType); bool CheckMemberPointerConversion(Expr *From, QualType ToType, CastKind &Kind, CXXCastPath &BasePath, bool IgnoreBaseAccess); bool IsQualificationConversion(QualType FromType, QualType ToType, bool CStyle, bool &ObjCLifetimeConversion); bool IsNoReturnConversion(QualType FromType, QualType ToType, QualType &ResultTy); bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType); bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg); ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity, const VarDecl *NRVOCandidate, QualType ResultType, Expr *Value, bool AllowNRVO = true); bool CanPerformCopyInitialization(const InitializedEntity &Entity, ExprResult Init); ExprResult PerformCopyInitialization(const InitializedEntity &Entity, SourceLocation EqualLoc, ExprResult Init, bool TopLevelOfInitList = false, bool AllowExplicit = false); ExprResult PerformObjectArgumentInitialization(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, CXXMethodDecl *Method); ExprResult PerformContextuallyConvertToBool(Expr *From); ExprResult PerformContextuallyConvertToObjCPointer(Expr *From); /// Contexts in which a converted constant expression is required. enum CCEKind { CCEK_CaseValue, ///< Expression in a case label. CCEK_Enumerator, ///< Enumerator value with fixed underlying type. CCEK_TemplateArg, ///< Value of a non-type template parameter. CCEK_NewExpr ///< Constant expression in a noptr-new-declarator. }; ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, llvm::APSInt &Value, CCEKind CCE); ExprResult CheckConvertedConstantExpression(Expr *From, QualType T, APValue &Value, CCEKind CCE); /// \brief Abstract base class used to perform a contextual implicit /// conversion from an expression to any type passing a filter. class ContextualImplicitConverter { public: bool Suppress; bool SuppressConversion; ContextualImplicitConverter(bool Suppress = false, bool SuppressConversion = false) : Suppress(Suppress), SuppressConversion(SuppressConversion) {} /// \brief Determine whether the specified type is a valid destination type /// for this conversion. virtual bool match(QualType T) = 0; /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the expression has incomplete class type. virtual SemaDiagnosticBuilder diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a diagnostic when the only matching conversion function /// is explicit. virtual SemaDiagnosticBuilder diagnoseExplicitConv( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; /// \brief Emits a note for the explicit conversion function. virtual SemaDiagnosticBuilder noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when there are multiple possible conversion /// functions. virtual SemaDiagnosticBuilder diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0; /// \brief Emits a note for one of the candidate conversions. virtual SemaDiagnosticBuilder noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0; /// \brief Emits a diagnostic when we picked a conversion function /// (for cases when we are not allowed to pick a conversion function). virtual SemaDiagnosticBuilder diagnoseConversion( Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0; virtual ~ContextualImplicitConverter() {} }; class ICEConvertDiagnoser : public ContextualImplicitConverter { bool AllowScopedEnumerations; public: ICEConvertDiagnoser(bool AllowScopedEnumerations, bool Suppress, bool SuppressConversion) : ContextualImplicitConverter(Suppress, SuppressConversion), AllowScopedEnumerations(AllowScopedEnumerations) {} /// Match an integral or (possibly scoped) enumeration type. bool match(QualType T) override; SemaDiagnosticBuilder diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override { return diagnoseNotInt(S, Loc, T); } /// \brief Emits a diagnostic complaining that the expression does not have /// integral or enumeration type. virtual SemaDiagnosticBuilder diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0; }; /// Perform a contextual implicit conversion. ExprResult PerformContextualImplicitConversion( SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter); enum ObjCSubscriptKind { OS_Array, OS_Dictionary, OS_Error }; ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE); // Note that LK_String is intentionally after the other literals, as // this is used for diagnostics logic. enum ObjCLiteralKind { LK_Array, LK_Dictionary, LK_Numeric, LK_Boxed, LK_String, LK_Block, LK_None }; ObjCLiteralKind CheckLiteralKind(Expr *FromE); ExprResult PerformObjectMemberConversion(Expr *From, NestedNameSpecifier *Qualifier, NamedDecl *FoundDecl, NamedDecl *Member); // Members have to be NamespaceDecl* or TranslationUnitDecl*. // TODO: make this is a typesafe union. typedef llvm::SmallPtrSet<DeclContext *, 16> AssociatedNamespaceSet; typedef llvm::SmallPtrSet<CXXRecordDecl *, 16> AssociatedClassSet; void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false, bool PartialOverloading = false, bool AllowExplicit = false); void AddFunctionCandidates(const UnresolvedSetImpl &Functions, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); void AddMethodCandidate(DeclAccessPair FoundDecl, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversion = false); void AddMethodCandidate(CXXMethodDecl *Method, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false); void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ObjectType, Expr::Classification ObjectClassification, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false); void AddTemplateOverloadCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool SuppressUserConversions = false); void AddConversionCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet& CandidateSet, bool AllowObjCConversionOnExplicit); void AddTemplateConversionCandidate(FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, Expr *From, QualType ToType, OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit); void AddSurrogateCandidate(CXXConversionDecl *Conversion, DeclAccessPair FoundDecl, CXXRecordDecl *ActingContext, const FunctionProtoType *Proto, Expr *Object, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddMemberOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, SourceRange OpRange = SourceRange()); void AddBuiltinCandidate(QualType ResultTy, QualType *ParamTys, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet, bool IsAssignmentOperator = false, unsigned NumContextualBoolArguments = 0); void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op, SourceLocation OpLoc, ArrayRef<Expr *> Args, OverloadCandidateSet& CandidateSet); void AddArgumentDependentLookupCandidates(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, TemplateArgumentListInfo *ExplicitTemplateArgs, OverloadCandidateSet& CandidateSet, bool PartialOverloading = false); // Emit as a 'note' the specific overload candidate void NoteOverloadCandidate(FunctionDecl *Fn, QualType DestType = QualType()); // Emit as a series of 'note's all template and non-templates // identified by the expression Expr void NoteAllOverloadCandidates(Expr* E, QualType DestType = QualType()); /// Check the enable_if expressions on the given function. Returns the first /// failing attribute, or NULL if they were all successful. EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args, bool MissingImplicitThis = false); // [PossiblyAFunctionType] --> [Return] // NonFunctionType --> NonFunctionType // R (A) --> R(A) // R (*)(A) --> R (A) // R (&)(A) --> R (A) // R (S::*)(A) --> R (A) QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType); FunctionDecl * ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr, QualType TargetType, bool Complain, DeclAccessPair &Found, bool *pHadMultipleCandidates = nullptr); FunctionDecl * ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl, bool Complain = false, DeclAccessPair *Found = nullptr); bool ResolveAndFixSingleFunctionTemplateSpecialization( ExprResult &SrcExpr, bool DoFunctionPointerConverion = false, bool Complain = false, const SourceRange& OpRangeForComplaining = SourceRange(), QualType DestTypeForComplaining = QualType(), unsigned DiagIDForComplaining = 0); Expr *FixOverloadedFunctionReference(Expr *E, DeclAccessPair FoundDecl, FunctionDecl *Fn); ExprResult FixOverloadedFunctionReference(ExprResult, DeclAccessPair FoundDecl, FunctionDecl *Fn); void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE, ArrayRef<Expr *> Args, OverloadCandidateSet &CandidateSet, bool PartialOverloading = false); // An enum used to represent the different possible results of building a // range-based for loop. enum ForRangeStatus { FRS_Success, FRS_NoViableFunction, FRS_DiagnosticIssued }; // An enum to represent whether something is dealing with a call to begin() // or a call to end() in a range-based for loop. enum BeginEndFunction { BEF_begin, BEF_end }; ForRangeStatus BuildForRangeBeginEndCall(Scope *S, SourceLocation Loc, SourceLocation RangeLoc, VarDecl *Decl, BeginEndFunction BEF, const DeclarationNameInfo &NameInfo, LookupResult &MemberLookup, OverloadCandidateSet *CandidateSet, Expr *Range, ExprResult *CallExpr); ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc, Expr *ExecConfig, bool AllowTypoCorrection=true); bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE, MultiExprArg Args, SourceLocation RParenLoc, OverloadCandidateSet *CandidateSet, ExprResult *Result); ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *input); ExprResult CreateOverloadedBinOp(SourceLocation OpLoc, unsigned Opc, const UnresolvedSetImpl &Fns, Expr *LHS, Expr *RHS); ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc, SourceLocation RLoc, Expr *Base,Expr *Idx); ExprResult BuildCallToMemberFunction(Scope *S, Expr *MemExpr, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc, MultiExprArg Args, SourceLocation RParenLoc); ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool *NoArrowOperatorFound = nullptr); /// CheckCallReturnType - Checks that a call expression's return type is /// complete. Returns true on failure. The location passed in is the location /// that best represents the call. bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc, CallExpr *CE, FunctionDecl *FD); /// Helpers for dealing with blocks and functions. bool CheckParmsForFunctionDef(ParmVarDecl *const *Param, ParmVarDecl *const *ParamEnd, bool CheckParameterNames); void CheckCXXDefaultArguments(FunctionDecl *FD); void CheckExtraCXXDefaultArguments(Declarator &D); Scope *getNonFieldDeclScope(Scope *S); /// \name Name lookup /// /// These routines provide name lookup that is used during semantic /// analysis to resolve the various kinds of names (identifiers, /// overloaded operator names, constructor names, etc.) into zero or /// more declarations within a particular scope. The major entry /// points are LookupName, which performs unqualified name lookup, /// and LookupQualifiedName, which performs qualified name lookup. /// /// All name lookup is performed based on some specific criteria, /// which specify what names will be visible to name lookup and how /// far name lookup should work. These criteria are important both /// for capturing language semantics (certain lookups will ignore /// certain names, for example) and for performance, since name /// lookup is often a bottleneck in the compilation of C++. Name /// lookup criteria is specified via the LookupCriteria enumeration. /// /// The results of name lookup can vary based on the kind of name /// lookup performed, the current language, and the translation /// unit. In C, for example, name lookup will either return nothing /// (no entity found) or a single declaration. In C++, name lookup /// can additionally refer to a set of overloaded functions or /// result in an ambiguity. All of the possible results of name /// lookup are captured by the LookupResult class, which provides /// the ability to distinguish among them. //@{ /// @brief Describes the kind of name lookup to perform. enum LookupNameKind { /// Ordinary name lookup, which finds ordinary names (functions, /// variables, typedefs, etc.) in C and most kinds of names /// (functions, variables, members, types, etc.) in C++. LookupOrdinaryName = 0, /// Tag name lookup, which finds the names of enums, classes, /// structs, and unions. LookupTagName, /// Label name lookup. LookupLabel, /// Member name lookup, which finds the names of /// class/struct/union members. LookupMemberName, /// Look up of an operator name (e.g., operator+) for use with /// operator overloading. This lookup is similar to ordinary name /// lookup, but will ignore any declarations that are class members. LookupOperatorName, /// Look up of a name that precedes the '::' scope resolution /// operator in C++. This lookup completely ignores operator, object, /// function, and enumerator names (C++ [basic.lookup.qual]p1). LookupNestedNameSpecifierName, /// Look up a namespace name within a C++ using directive or /// namespace alias definition, ignoring non-namespace names (C++ /// [basic.lookup.udir]p1). LookupNamespaceName, /// Look up all declarations in a scope with the given name, /// including resolved using declarations. This is appropriate /// for checking redeclarations for a using declaration. LookupUsingDeclName, /// Look up an ordinary name that is going to be redeclared as a /// name with linkage. This lookup ignores any declarations that /// are outside of the current scope unless they have linkage. See /// C99 6.2.2p4-5 and C++ [basic.link]p6. LookupRedeclarationWithLinkage, /// Look up a friend of a local class. This lookup does not look /// outside the innermost non-class scope. See C++11 [class.friend]p11. LookupLocalFriendName, /// Look up the name of an Objective-C protocol. LookupObjCProtocolName, /// Look up implicit 'self' parameter of an objective-c method. LookupObjCImplicitSelfParam, /// \brief Look up any declaration with any name. LookupAnyName }; /// \brief Specifies whether (or how) name lookup is being performed for a /// redeclaration (vs. a reference). enum RedeclarationKind { /// \brief The lookup is a reference to this name that is not for the /// purpose of redeclaring the name. NotForRedeclaration = 0, /// \brief The lookup results will be used for redeclaration of a name, /// if an entity by that name already exists. ForRedeclaration }; /// \brief The possible outcomes of name lookup for a literal operator. enum LiteralOperatorLookupResult { /// \brief The lookup resulted in an error. LOLR_Error, /// \brief The lookup found a single 'cooked' literal operator, which /// expects a normal literal to be built and passed to it. LOLR_Cooked, /// \brief The lookup found a single 'raw' literal operator, which expects /// a string literal containing the spelling of the literal token. LOLR_Raw, /// \brief The lookup found an overload set of literal operator templates, /// which expect the characters of the spelling of the literal token to be /// passed as a non-type template argument pack. LOLR_Template, /// \brief The lookup found an overload set of literal operator templates, /// which expect the character type and characters of the spelling of the /// string literal token to be passed as template arguments. LOLR_StringTemplate }; SpecialMemberOverloadResult *LookupSpecialMember(CXXRecordDecl *D, CXXSpecialMember SM, bool ConstArg, bool VolatileArg, bool RValueThis, bool ConstThis, bool VolatileThis); typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator; typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)> TypoRecoveryCallback; private: bool CppLookupName(LookupResult &R, Scope *S); struct TypoExprState { std::unique_ptr<TypoCorrectionConsumer> Consumer; TypoDiagnosticGenerator DiagHandler; TypoRecoveryCallback RecoveryHandler; TypoExprState(); TypoExprState(TypoExprState&& other) LLVM_NOEXCEPT; TypoExprState& operator=(TypoExprState&& other) LLVM_NOEXCEPT; }; /// \brief The set of unhandled TypoExprs and their associated state. llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos; /// \brief Creates a new TypoExpr AST node. TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC); // \brief The set of known/encountered (unique, canonicalized) NamespaceDecls. // // The boolean value will be true to indicate that the namespace was loaded // from an AST/PCH file, or false otherwise. llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces; /// \brief Whether we have already loaded known namespaces from an extenal /// source. bool LoadedExternalKnownNamespaces; /// \brief Helper for CorrectTypo and CorrectTypoDelayed used to create and /// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction /// should be skipped entirely. std::unique_ptr<TypoCorrectionConsumer> makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, DeclContext *MemberContext, bool EnteringContext, const ObjCObjectPointerType *OPT, bool ErrorRecovery); public: const TypoExprState &getTypoExprState(TypoExpr *TE) const; /// \brief Clears the state of the given TypoExpr. void clearDelayedTypo(TypoExpr *TE); /// \brief Look up a name, looking for a single declaration. Return /// null if the results were absent, ambiguous, or overloaded. /// /// It is preferable to use the elaborated form and explicitly handle /// ambiguity and overloaded. NamedDecl *LookupSingleName(Scope *S, DeclarationName Name, SourceLocation Loc, LookupNameKind NameKind, RedeclarationKind Redecl = NotForRedeclaration); bool LookupName(LookupResult &R, Scope *S, bool AllowBuiltinCreation = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, bool InUnqualifiedLookup = false); bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx, CXXScopeSpec &SS); bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS, bool AllowBuiltinCreation = false, bool EnteringContext = false); ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc, RedeclarationKind Redecl = NotForRedeclaration); bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class); void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S, QualType T1, QualType T2, UnresolvedSetImpl &Functions); void addOverloadedOperatorToUnresolvedSet(UnresolvedSetImpl &Functions, DeclAccessPair Operator, QualType T1, QualType T2); LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc, SourceLocation GnuLabelLoc = SourceLocation()); DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class); CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class); CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class, unsigned Quals); CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals, bool RValueThis, unsigned ThisQuals); CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class); bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id); LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R, ArrayRef<QualType> ArgTys, bool AllowRaw, bool AllowTemplate, bool AllowStringTemplate); bool isKnownName(StringRef name); void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc, ArrayRef<Expr *> Args, ADLResult &Functions); void LookupVisibleDecls(Scope *S, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind, VisibleDeclConsumer &Consumer, bool IncludeGlobalScope = true); enum CorrectTypoKind { CTK_NonError, // CorrectTypo used in a non error recovery situation. CTK_ErrorRecovery // CorrectTypo used in normal error recovery. }; TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr, bool RecordFailure = true); TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo, Sema::LookupNameKind LookupKind, Scope *S, CXXScopeSpec *SS, std::unique_ptr<CorrectionCandidateCallback> CCC, TypoDiagnosticGenerator TDG, TypoRecoveryCallback TRC, CorrectTypoKind Mode, DeclContext *MemberContext = nullptr, bool EnteringContext = false, const ObjCObjectPointerType *OPT = nullptr); ExprResult CorrectDelayedTyposInExpr(Expr *E, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }); ExprResult CorrectDelayedTyposInExpr(ExprResult ER, llvm::function_ref<ExprResult(Expr *)> Filter = [](Expr *E) -> ExprResult { return E; }) { return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter); } void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, bool ErrorRecovery = true); void diagnoseTypo(const TypoCorrection &Correction, const PartialDiagnostic &TypoDiag, const PartialDiagnostic &PrevNote, bool ErrorRecovery = true); void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc, ArrayRef<Expr *> Args, AssociatedNamespaceSet &AssociatedNamespaces, AssociatedClassSet &AssociatedClasses); void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S, bool ConsiderLinkage, bool AllowInlineNamespace); void DiagnoseAmbiguousLookup(LookupResult &Result); //@} ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id, SourceLocation IdLoc, bool TypoCorrection = false); NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID, Scope *S, bool ForRedeclaration, SourceLocation Loc); NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II, Scope *S); void AddKnownFunctionAttributes(FunctionDecl *FD); // More parsing and symbol table subroutines. void ProcessPragmaWeak(Scope *S, Decl *D); // Decl attributes - this routine is the top level dispatcher. void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD); void ProcessDeclAttributeList(Scope *S, Decl *D, const AttributeList *AL, bool IncludeCXX11Attributes = true); bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl, const AttributeList *AttrList); void checkUnusedDeclAttributes(Declarator &D); /// Determine if type T is a valid subject for a nonnull and similar /// attributes. By default, we look through references (the behavior used by /// nonnull), but if the second parameter is true, then we treat a reference /// type as valid. bool isValidPointerAttrType(QualType T, bool RefOkay = false); bool CheckRegparmAttr(const AttributeList &attr, unsigned &value); bool CheckCallingConvAttr(const AttributeList &attr, CallingConv &CC, const FunctionDecl *FD = nullptr); bool CheckNoReturnAttr(const AttributeList &attr); bool checkStringLiteralArgumentAttr(const AttributeList &Attr, unsigned ArgNum, StringRef &Str, SourceLocation *ArgLocation = nullptr); bool checkMSInheritanceAttrOnDefinition( CXXRecordDecl *RD, SourceRange Range, bool BestCase, MSInheritanceAttr::Spelling SemanticSpelling); void CheckAlignasUnderalignment(Decl *D); /// Adjust the calling convention of a method to be the ABI default if it /// wasn't specified explicitly. This handles method types formed from /// function type typedefs and typename template arguments. void adjustMemberFunctionCC(QualType &T, bool IsStatic); // Check if there is an explicit attribute, but only look through parens. // The intent is to look for an attribute on the current declarator, but not // one that came from a typedef. bool hasExplicitCallingConv(QualType &T); /// Get the outermost AttributedType node that sets a calling convention. /// Valid types should not have multiple attributes with different CCs. const AttributedType *getCallingConvAttributedType(QualType T) const; /// \brief Stmt attributes - this routine is the top level dispatcher. StmtResult ProcessStmtAttributes(Stmt *Stmt, AttributeList *Attrs, SourceRange Range); void WarnConflictingTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); void CheckConflictingOverridingMethod(ObjCMethodDecl *Method, ObjCMethodDecl *Overridden, bool IsProtocolMethodDecl); /// WarnExactTypedMethods - This routine issues a warning if method /// implementation declaration matches exactly that of its declaration. void WarnExactTypedMethods(ObjCMethodDecl *Method, ObjCMethodDecl *MethodDecl, bool IsProtocolMethodDecl); typedef llvm::SmallPtrSet<Selector, 8> SelectorSet; typedef llvm::DenseMap<Selector, ObjCMethodDecl*> ProtocolsMethodsMap; /// CheckImplementationIvars - This routine checks if the instance variables /// listed in the implelementation match those listed in the interface. void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl, ObjCIvarDecl **Fields, unsigned nIvars, SourceLocation Loc); /// ImplMethodsVsClassMethods - This is main routine to warn if any method /// remains unimplemented in the class or category \@implementation. void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool IncompleteImpl = false); /// DiagnoseUnimplementedProperties - This routine warns on those properties /// which must be implemented by this implementation. void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl, ObjCContainerDecl *CDecl, bool SynthesizeProperties); /// DefaultSynthesizeProperties - This routine default synthesizes all /// properties which must be synthesized in the class's \@implementation. void DefaultSynthesizeProperties (Scope *S, ObjCImplDecl* IMPDecl, ObjCInterfaceDecl *IDecl); void DefaultSynthesizeProperties(Scope *S, Decl *D); /// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is /// an ivar synthesized for 'Method' and 'Method' is a property accessor /// declared in class 'IFace'. bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace, ObjCMethodDecl *Method, ObjCIvarDecl *IV); /// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which /// backs the property is not used in the property's accessor. void DiagnoseUnusedBackingIvarInAccessor(Scope *S, const ObjCImplementationDecl *ImplD); /// GetIvarBackingPropertyAccessor - If method is a property setter/getter and /// it property has a backing ivar, returns this ivar; otherwise, returns NULL. /// It also returns ivar's property on success. ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method, const ObjCPropertyDecl *&PDecl) const; /// Called by ActOnProperty to handle \@property declarations in /// class extensions. ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, bool *isOverridingProperty, TypeSourceInfo *T, tok::ObjCKeywordKind MethodImplKind); /// Called by ActOnProperty and HandlePropertyInClassExtension to /// handle creating the ObjcPropertyDecl for a category or \@interface. ObjCPropertyDecl *CreatePropertyDecl(Scope *S, ObjCContainerDecl *CDecl, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, Selector GetterSel, Selector SetterSel, const bool isAssign, const bool isReadWrite, const unsigned Attributes, const unsigned AttributesAsWritten, TypeSourceInfo *T, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); /// AtomicPropertySetterGetterRules - This routine enforces the rule (via /// warning) when atomic property has one but not the other user-declared /// setter or getter. void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl); void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D); void DiagnoseMissingDesignatedInitOverrides( const ObjCImplementationDecl *ImplD, const ObjCInterfaceDecl *IFD); void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID); enum MethodMatchStrategy { MMS_loose, MMS_strict }; /// MatchTwoMethodDeclarations - Checks if two methods' type match and returns /// true, or false, accordingly. bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method, const ObjCMethodDecl *PrevMethod, MethodMatchStrategy strategy = MMS_strict); /// MatchAllMethodDeclarations - Check methods declaraed in interface or /// or protocol against those declared in their implementations. void MatchAllMethodDeclarations(const SelectorSet &InsMap, const SelectorSet &ClsMap, SelectorSet &InsMapSeen, SelectorSet &ClsMapSeen, ObjCImplDecl* IMPDecl, ObjCContainerDecl* IDecl, bool &IncompleteImpl, bool ImmediateClass, bool WarnCategoryMethodImpl=false); /// CheckCategoryVsClassMethodMatches - Checks that methods implemented in /// category matches with those implemented in its primary class and /// warns each time an exact match is found. void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP); /// \brief Add the given method to the list of globally-known methods. void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method); private: /// AddMethodToGlobalPool - Add an instance or factory method to the global /// pool. See descriptoin of AddInstanceMethodToGlobalPool. void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance); /// LookupMethodInGlobalPool - Returns the instance or factory method and /// optionally warns if there are multiple signatures. ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass, bool warn, bool instance); public: /// \brief - Returns instance or factory methods in global method pool for /// given selector. If no such method or only one method found, function returns /// false; otherwise, it returns true bool CollectMultipleMethodsInGlobalPool(Selector Sel, SmallVectorImpl<ObjCMethodDecl*>& Methods, bool instance); bool AreMultipleMethodsInGlobalPool(Selector Sel, bool instance); private: /// \brief - Returns a selector which best matches given argument list or /// nullptr if none could be found ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args, bool IsInstance); /// \brief Record the typo correction failure and return an empty correction. TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc, bool RecordFailure = true) { if (RecordFailure) TypoCorrectionFailures[Typo].insert(TypoLoc); return TypoCorrection(); } public: /// AddInstanceMethodToGlobalPool - All instance methods in a translation /// unit are added to a global pool. This allows us to efficiently associate /// a selector with a method declaraation for purposes of typechecking /// messages sent to "id" (where the class of the object is unknown). void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/true); } /// AddFactoryMethodToGlobalPool - Same as above, but for factory methods. void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) { AddMethodToGlobalPool(Method, impl, /*instance*/false); } /// AddAnyMethodToGlobalPool - Add any method, instance or factory to global /// pool. void AddAnyMethodToGlobalPool(Decl *D); /// LookupInstanceMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false, bool warn=true) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, warn, /*instance*/true); } /// LookupFactoryMethodInGlobalPool - Returns the method and warns if /// there are multiple signatures. ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R, bool receiverIdOrClass=false, bool warn=true) { return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass, warn, /*instance*/false); } const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel, QualType ObjectType=QualType()); /// LookupImplementedMethodInGlobalPool - Returns the method which has an /// implementation. ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel); /// CollectIvarsToConstructOrDestruct - Collect those ivars which require /// initialization. void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI, SmallVectorImpl<ObjCIvarDecl*> &Ivars); //===--------------------------------------------------------------------===// // Statement Parsing Callbacks: SemaStmt.cpp. public: class FullExprArg { public: FullExprArg(Sema &actions) : E(nullptr) { } ExprResult release() { return E; } Expr *get() const { return E; } Expr *operator->() { return E; } private: // FIXME: No need to make the entire Sema class a friend when it's just // Sema::MakeFullExpr that needs access to the constructor below. friend class Sema; explicit FullExprArg(Expr *expr) : E(expr) {} Expr *E; }; FullExprArg MakeFullExpr(Expr *Arg) { return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation()); } FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) { return FullExprArg(ActOnFinishFullExpr(Arg, CC).get()); } FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) { ExprResult FE = ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(), /*DiscardedValue*/ true); return FullExprArg(FE.get()); } StmtResult ActOnExprStmt(ExprResult Arg); StmtResult ActOnExprStmtError(); StmtResult ActOnNullStmt(SourceLocation SemiLoc, bool HasLeadingEmptyMacro = false); void ActOnStartOfCompoundStmt(); void ActOnFinishOfCompoundStmt(); StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R, ArrayRef<Stmt *> Elts, bool isStmtExpr); /// \brief A RAII object to enter scope of a compound statement. class CompoundScopeRAII { public: CompoundScopeRAII(Sema &S): S(S) { S.ActOnStartOfCompoundStmt(); } ~CompoundScopeRAII() { S.ActOnFinishOfCompoundStmt(); } private: Sema &S; }; /// An RAII helper that pops function a function scope on exit. struct FunctionScopeRAII { Sema &S; bool Active; FunctionScopeRAII(Sema &S) : S(S), Active(true) {} ~FunctionScopeRAII() { if (Active) S.PopFunctionScopeInfo(); } void disable() { Active = false; } }; StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl, SourceLocation StartLoc, SourceLocation EndLoc); void ActOnForEachDeclStmt(DeclGroupPtrTy Decl); StmtResult ActOnForEachLValueExpr(Expr *E); StmtResult ActOnCaseStmt(SourceLocation CaseLoc, Expr *LHSVal, SourceLocation DotDotDotLoc, Expr *RHSVal, SourceLocation ColonLoc); void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt); StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc, SourceLocation ColonLoc, Stmt *SubStmt, Scope *CurScope); StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl, SourceLocation ColonLoc, Stmt *SubStmt); StmtResult ActOnAttributedStmt(SourceLocation AttrLoc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); StmtResult ActOnIfStmt(SourceLocation IfLoc, FullExprArg CondVal, Decl *CondVar, Stmt *ThenVal, SourceLocation ElseLoc, Stmt *ElseVal); StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc, Expr *Cond, Decl *CondVar); StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc, Stmt *Switch, Stmt *Body); StmtResult ActOnWhileStmt(SourceLocation WhileLoc, FullExprArg Cond, Decl *CondVar, Stmt *Body); StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body, SourceLocation WhileLoc, SourceLocation CondLParen, Expr *Cond, SourceLocation CondRParen); StmtResult ActOnForStmt(SourceLocation ForLoc, SourceLocation LParenLoc, Stmt *First, FullExprArg Second, Decl *SecondVar, FullExprArg Third, SourceLocation RParenLoc, Stmt *Body); ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc, Expr *collection); StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc, Stmt *First, Expr *collection, SourceLocation RParenLoc); StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body); enum BuildForRangeKind { /// Initial building of a for-range statement. BFRK_Build, /// Instantiation or recovery rebuild of a for-range statement. Don't /// attempt any typo-correction. BFRK_Rebuild, /// Determining whether a for-range statement could be built. Avoid any /// unnecessary or irreversible actions. BFRK_Check }; StmtResult ActOnCXXForRangeStmt(SourceLocation ForLoc, Stmt *LoopVar, SourceLocation ColonLoc, Expr *Collection, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc, SourceLocation ColonLoc, Stmt *RangeDecl, Stmt *BeginEndDecl, Expr *Cond, Expr *Inc, Stmt *LoopVarDecl, SourceLocation RParenLoc, BuildForRangeKind Kind); StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body); StmtResult ActOnGotoStmt(SourceLocation GotoLoc, SourceLocation LabelLoc, LabelDecl *TheDecl); StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc, SourceLocation StarLoc, Expr *DestExp); StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope); StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope); void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, unsigned NumParams); typedef std::pair<StringRef, QualType> CapturedParamNameType; void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope, CapturedRegionKind Kind, ArrayRef<CapturedParamNameType> Params); StmtResult ActOnCapturedRegionEnd(Stmt *S); void ActOnCapturedRegionError(); RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD, SourceLocation Loc, unsigned NumParams); VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E, bool AllowFunctionParameters); bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD, bool AllowFunctionParameters); StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp, Scope *CurScope); StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp); StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple, bool IsVolatile, unsigned NumOutputs, unsigned NumInputs, IdentifierInfo **Names, MultiExprArg Constraints, MultiExprArg Exprs, Expr *AsmString, MultiExprArg Clobbers, SourceLocation RParenLoc); ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, llvm::InlineAsmIdentifierInfo &Info, bool IsUnevaluatedContext); bool LookupInlineAsmField(StringRef Base, StringRef Member, unsigned &Offset, SourceLocation AsmLoc); StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc, ArrayRef<Token> AsmToks, StringRef AsmString, unsigned NumOutputs, unsigned NumInputs, ArrayRef<StringRef> Constraints, ArrayRef<StringRef> Clobbers, ArrayRef<Expr*> Exprs, SourceLocation EndLoc); LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName, SourceLocation Location, bool AlwaysCreate); VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id, bool Invalid = false); Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D); StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen, Decl *Parm, Stmt *Body); StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body); StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try, MultiStmtArg Catch, Stmt *Finally); StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw); StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw, Scope *CurScope); ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc, Expr *operand); StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc, Expr *SynchExpr, Stmt *SynchBody); StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body); VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo, SourceLocation StartLoc, SourceLocation IdLoc, IdentifierInfo *Id); Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D); StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc, Decl *ExDecl, Stmt *HandlerBlock); StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock, ArrayRef<Stmt *> Handlers); StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ? SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); StmtResult ActOnSEHExceptBlock(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); StmtResult ActOnSEHFinallyBlock(SourceLocation Loc, Stmt *Block); StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope); void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock); bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const; /// \brief If it's a file scoped decl that must warn if not used, keep track /// of it. void MarkUnusedFileScopedDecl(const DeclaratorDecl *D); /// DiagnoseUnusedExprResult - If the statement passed in is an expression /// whose result is unused, warn. void DiagnoseUnusedExprResult(const Stmt *S); void DiagnoseUnusedNestedTypedefs(const RecordDecl *D); void DiagnoseUnusedDecl(const NamedDecl *ND); /// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null /// statement as a \p Body, and it is located on the same line. /// /// This helps prevent bugs due to typos, such as: /// if (condition); /// do_stuff(); void DiagnoseEmptyStmtBody(SourceLocation StmtLoc, const Stmt *Body, unsigned DiagID); /// Warn if a for/while loop statement \p S, which is followed by /// \p PossibleBody, has a suspicious null statement as a body. void DiagnoseEmptyLoopBody(const Stmt *S, const Stmt *PossibleBody); ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) { return DelayedDiagnostics.push(pool); } void PopParsingDeclaration(ParsingDeclState state, Decl *decl); typedef ProcessingContextState ParsingClassState; ParsingClassState PushParsingClass() { return DelayedDiagnostics.pushUndelayed(); } void PopParsingClass(ParsingClassState state) { DelayedDiagnostics.popUndelayed(state); } void redelayDiagnostics(sema::DelayedDiagnosticPool &pool); enum AvailabilityDiagnostic { AD_Deprecation, AD_Unavailable }; void EmitAvailabilityWarning(AvailabilityDiagnostic AD, NamedDecl *D, StringRef Message, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass, const ObjCPropertyDecl *ObjCProperty, bool ObjCPropertyAccess); bool makeUnavailableInSystemHeader(SourceLocation loc, StringRef message); //===--------------------------------------------------------------------===// // Expression Parsing Callbacks: SemaExpr.cpp. bool CanUseDecl(NamedDecl *D); bool DiagnoseUseOfDecl(NamedDecl *D, SourceLocation Loc, const ObjCInterfaceDecl *UnknownObjCClass=nullptr, bool ObjCPropertyAccess=false); void NoteDeletedFunction(FunctionDecl *FD); std::string getDeletedOrUnavailableSuffix(const FunctionDecl *FD); bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD, ObjCMethodDecl *Getter, SourceLocation Loc); void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc, ArrayRef<Expr *> Args); void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false); enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl }; void PushExpressionEvaluationContext(ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t, bool IsDecltype = false); void PopExpressionEvaluationContext(); void DiscardCleanupsInEvaluationContext(); ExprResult TransformToPotentiallyEvaluated(Expr *E); ExprResult HandleExprEvaluationContextForTypeof(Expr *E); ExprResult ActOnConstantExpression(ExprResult Res); // Functions for marking a declaration referenced. These functions also // contain the relevant logic for marking if a reference to a function or // variable is an odr-use (in the C++11 sense). There are separate variants // for expressions referring to a decl; these exist because odr-use marking // needs to be delayed for some constant variables when we build one of the // named expressions. void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool OdrUse); void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func, bool OdrUse = true); void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var); void MarkDeclRefReferenced(DeclRefExpr *E); void MarkMemberReferenced(MemberExpr *E); void UpdateMarkingForLValueToRValue(Expr *E); void CleanupVarDeclMarking(); enum TryCaptureKind { TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef }; /// \brief Try to capture the given variable. /// /// \param Var The variable to capture. /// /// \param Loc The location at which the capture occurs. /// /// \param Kind The kind of capture, which may be implicit (for either a /// block or a lambda), or explicit by-value or by-reference (for a lambda). /// /// \param EllipsisLoc The location of the ellipsis, if one is provided in /// an explicit lambda capture. /// /// \param BuildAndDiagnose Whether we are actually supposed to add the /// captures or diagnose errors. If false, this routine merely check whether /// the capture can occur without performing the capture itself or complaining /// if the variable cannot be captured. /// /// \param CaptureType Will be set to the type of the field used to capture /// this variable in the innermost block or lambda. Only valid when the /// variable can be captured. /// /// \param DeclRefType Will be set to the type of a reference to the capture /// from within the current scope. Only valid when the variable can be /// captured. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// variables that may or may not be used in certain specializations of /// a nested generic lambda. /// /// \returns true if an error occurred (i.e., the variable cannot be /// captured) and false if the capture succeeded. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind, SourceLocation EllipsisLoc, bool BuildAndDiagnose, QualType &CaptureType, QualType &DeclRefType, const unsigned *const FunctionScopeIndexToStopAt); /// \brief Try to capture the given variable. bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind = TryCapture_Implicit, SourceLocation EllipsisLoc = SourceLocation()); /// \brief Checks if the variable must be captured. bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc); /// \brief Given a variable, determine the type that a reference to that /// variable will have in the given scope. QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc); void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T); void MarkDeclarationsReferencedInExpr(Expr *E, bool SkipLocalVariables = false); /// \brief Try to recover by turning the given expression into a /// call. Returns true if recovery was attempted or an error was /// emitted; this may also leave the ExprResult invalid. bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD, bool ForceComplain = false, bool (*IsPlausibleResult)(QualType) = nullptr); /// \brief Figure out if an expression could be turned into a call. bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy, UnresolvedSetImpl &NonTemplateOverloads); /// \brief Conditionally issue a diagnostic based on the current /// evaluation context. /// /// \param Statement If Statement is non-null, delay reporting the /// diagnostic until the function body is parsed, and then do a basic /// reachability analysis to determine if the statement is reachable. /// If it is unreachable, the diagnostic will not be emitted. bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement, const PartialDiagnostic &PD); // Primary Expressions. SourceRange getExprRange(Expr *E) const; ExprResult ActOnIdExpression( Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand, std::unique_ptr<CorrectionCandidateCallback> CCC = nullptr, bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr); void DecomposeUnqualifiedId(const UnqualifiedId &Id, TemplateArgumentListInfo &Buffer, DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *&TemplateArgs); bool DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R, std::unique_ptr<CorrectionCandidateCallback> CCC, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr, ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr); ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S, IdentifierInfo *II, bool AllowBuiltinCreation=false); ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, bool isAddressOfOperand, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, SourceLocation Loc, const CXXScopeSpec *SS = nullptr); ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK, const DeclarationNameInfo &NameInfo, const CXXScopeSpec *SS = nullptr, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr); ExprResult BuildAnonymousStructUnionMemberReference( const CXXScopeSpec &SS, SourceLocation nameLoc, IndirectFieldDecl *indirectField, DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none), Expr *baseObjectExpr = nullptr, SourceLocation opLoc = SourceLocation()); ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool IsDefiniteInstance); bool UseArgumentDependentLookup(const CXXScopeSpec &SS, const LookupResult &R, bool HasTrailingLParen); ExprResult BuildQualifiedDeclarationNameExpr( CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, bool IsAddressOfOperand, TypeSourceInfo **RecoveryTSI = nullptr); ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS, LookupResult &R, bool NeedsADL, bool AcceptInvalidDecl = false); ExprResult BuildDeclarationNameExpr( const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D, NamedDecl *FoundD = nullptr, const TemplateArgumentListInfo *TemplateArgs = nullptr, bool AcceptInvalidDecl = false); ExprResult BuildLiteralOperatorCall(LookupResult &R, DeclarationNameInfo &SuffixInfo, ArrayRef<Expr *> Args, SourceLocation LitEndLoc, TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr); ExprResult BuildPredefinedExpr(SourceLocation Loc, PredefinedExpr::IdentType IT); ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind); ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val); bool CheckLoopHintExpr(Expr *E, SourceLocation Loc); ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnCharacterConstant(const Token &Tok, Scope *UDLScope = nullptr); ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E); ExprResult ActOnParenListExpr(SourceLocation L, SourceLocation R, MultiExprArg Val); /// ActOnStringLiteral - The specified tokens were lexed as pasted string /// fragments (e.g. "foo" "bar" L"baz"). ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks, Scope *UDLScope = nullptr); ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<ParsedType> ArgTypes, ArrayRef<Expr *> ArgExprs); ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc, SourceLocation DefaultLoc, SourceLocation RParenLoc, Expr *ControllingExpr, ArrayRef<TypeSourceInfo *> Types, ArrayRef<Expr *> Exprs); // Binary/Unary Operators. 'Tok' is the token for the operator. ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *InputExpr); ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opc, Expr *Input); ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Op, Expr *Input); QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc); ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, SourceRange R); ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc, UnaryExprOrTypeTrait ExprKind, bool IsType, void *TyOrEx, const SourceRange &ArgRange); ExprResult CheckPlaceholderExpr(Expr *E); bool CheckVecStepExpr(Expr *E); bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind); bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc, SourceRange ExprRange, UnaryExprOrTypeTrait ExprKind); ExprResult ActOnSizeofParameterPackExpr(Scope *S, SourceLocation OpLoc, IdentifierInfo &Name, SourceLocation NameLoc, SourceLocation RParenLoc); ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind, Expr *Input); ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc, Expr *Idx, SourceLocation RLoc); // This struct is for use by ActOnMemberAccess to allow // BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after // changing the access operator from a '.' to a '->' (to see if that is the // change needed to fix an error about an unknown member, e.g. when the class // defines a custom operator->). struct ActOnMemberAccessExtraArgs { Scope *S; UnqualifiedId &Id; Decl *ObjCImpDecl; bool HasTrailingLParen; }; ExprResult BuildMemberReferenceExpr( Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, LookupResult &R, const TemplateArgumentListInfo *TemplateArgs, bool SuppressQualifierCheck = false, ActOnMemberAccessExtraArgs *ExtraArgs = nullptr); ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow); bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType, const CXXScopeSpec &SS, const LookupResult &R); ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType, bool IsArrow, SourceLocation OpLoc, const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Member, Decl *ObjCImpDecl, bool HasTrailingLParen); void ActOnDefaultCtorInitializers(Decl *CDtorDecl); bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn, FunctionDecl *FDecl, const FunctionProtoType *Proto, ArrayRef<Expr *> Args, SourceLocation RParenLoc, bool ExecConfig = false); void CheckStaticArrayArgument(SourceLocation CallLoc, ParmVarDecl *Param, const Expr *ArgExpr); /// ActOnCallExpr - Handle a call to Fn with the specified array of arguments. /// This provides the location of the left/right parens and a list of comma /// locations. ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc, MultiExprArg ArgExprs, SourceLocation RParenLoc, Expr *ExecConfig = nullptr, bool IsExecConfig = false); ExprResult BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc, ArrayRef<Expr *> Arg, SourceLocation RParenLoc, Expr *Config = nullptr, bool IsExecConfig = false); ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc, MultiExprArg ExecConfig, SourceLocation GGGLoc); ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc, Declarator &D, ParsedType &Ty, SourceLocation RParenLoc, Expr *CastExpr); ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc, TypeSourceInfo *Ty, SourceLocation RParenLoc, Expr *Op); CastKind PrepareScalarCast(ExprResult &src, QualType destType); /// \brief Build an altivec or OpenCL literal. ExprResult BuildVectorLiteral(SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E, TypeSourceInfo *TInfo); ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME); ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc, Expr *InitExpr); ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc, TypeSourceInfo *TInfo, SourceLocation RParenLoc, Expr *LiteralExpr); ExprResult ActOnInitList(SourceLocation LBraceLoc, MultiExprArg InitArgList, SourceLocation RBraceLoc); ExprResult ActOnDesignatedInitializer(Designation &Desig, SourceLocation Loc, bool GNUSyntax, ExprResult Init); private: static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind); public: ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc, tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr); ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr); /// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null /// in the case of a the GNU conditional expr extension. ExprResult ActOnConditionalOp(SourceLocation QuestionLoc, SourceLocation ColonLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr); /// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo". ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc, LabelDecl *TheDecl); void ActOnStartStmtExpr(); ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt, SourceLocation RPLoc); // "({..})" void ActOnStmtExprError(); // __builtin_offsetof(type, identifier(.identifier|[expr])*) struct OffsetOfComponent { SourceLocation LocStart, LocEnd; bool isBrackets; // true if [expr], false if .ident union { IdentifierInfo *IdentInfo; Expr *E; } U; }; /// __builtin_offsetof(type, a.b[123][456].c) ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc, TypeSourceInfo *TInfo, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); ExprResult ActOnBuiltinOffsetOf(Scope *S, SourceLocation BuiltinLoc, SourceLocation TypeLoc, ParsedType ParsedArgTy, OffsetOfComponent *CompPtr, unsigned NumComponents, SourceLocation RParenLoc); // __builtin_choose_expr(constExpr, expr1, expr2) ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc, Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr, SourceLocation RPLoc); // __builtin_va_arg(expr, type) ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty, SourceLocation RPLoc); ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E, TypeSourceInfo *TInfo, SourceLocation RPLoc); // __null ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc); bool CheckCaseExpression(Expr *E); /// \brief Describes the result of an "if-exists" condition check. enum IfExistsResult { /// \brief The symbol exists. IER_Exists, /// \brief The symbol does not exist. IER_DoesNotExist, /// \brief The name is a dependent name, so the results will differ /// from one instantiation to the next. IER_Dependent, /// \brief An error occurred. IER_Error }; IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS, const DeclarationNameInfo &TargetNameInfo); IfExistsResult CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name); StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, NestedNameSpecifierLoc QualifierLoc, DeclarationNameInfo NameInfo, Stmt *Nested); StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc, bool IsIfExists, CXXScopeSpec &SS, UnqualifiedId &Name, Stmt *Nested); //===------------------------- "Block" Extension ------------------------===// /// ActOnBlockStart - This callback is invoked when a block literal is /// started. void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockArguments - This callback allows processing of block arguments. /// If there are no arguments, this is still invoked. void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo, Scope *CurScope); /// ActOnBlockError - If there is an error parsing a block, this callback /// is invoked to pop the information about the block from the action impl. void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope); /// ActOnBlockStmtExpr - This is called when the body of a block statement /// literal was successfully completed. ^(int x){...} ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body, Scope *CurScope); //===---------------------------- Clang Extensions ----------------------===// /// __builtin_convertvector(...) ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- OpenCL Features -----------------------===// /// __builtin_astype(...) ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy, SourceLocation BuiltinLoc, SourceLocation RParenLoc); //===---------------------------- C++ Features --------------------------===// // Act on C++ namespaces Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc, SourceLocation NamespaceLoc, SourceLocation IdentLoc, IdentifierInfo *Ident, SourceLocation LBrace, AttributeList *AttrList); void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace); NamespaceDecl *getStdNamespace() const; NamespaceDecl *getOrCreateStdNamespace(); CXXRecordDecl *getStdBadAlloc() const; /// \brief Tests whether Ty is an instance of std::initializer_list and, if /// it is and Element is not NULL, assigns the element type to Element. bool isStdInitializerList(QualType Ty, QualType *Element); /// \brief Looks for the std::initializer_list template and instantiates it /// with Element, or emits an error if it's not found. /// /// \returns The instantiated template, or null on error. QualType BuildStdInitializerList(QualType Element, SourceLocation Loc); /// \brief Determine whether Ctor is an initializer-list constructor, as /// defined in [dcl.init.list]p2. bool isInitListConstructor(const CXXConstructorDecl *Ctor); Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc, SourceLocation NamespcLoc, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *NamespcName, AttributeList *AttrList); void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir); Decl *ActOnNamespaceAliasDef(Scope *CurScope, SourceLocation NamespaceLoc, SourceLocation AliasLoc, IdentifierInfo *Alias, CXXScopeSpec &SS, SourceLocation IdentLoc, IdentifierInfo *Ident); void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow); bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target, const LookupResult &PreviousDecls, UsingShadowDecl *&PrevShadow); UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD, NamedDecl *Target, UsingShadowDecl *PrevDecl); bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc, bool HasTypenameKeyword, const CXXScopeSpec &SS, SourceLocation NameLoc, const LookupResult &Previous); bool CheckUsingDeclQualifier(SourceLocation UsingLoc, const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, SourceLocation NameLoc); NamedDecl *BuildUsingDeclaration(Scope *S, AccessSpecifier AS, SourceLocation UsingLoc, CXXScopeSpec &SS, DeclarationNameInfo NameInfo, AttributeList *AttrList, bool IsInstantiation, bool HasTypenameKeyword, SourceLocation TypenameLoc); bool CheckInheritingConstructorUsingDecl(UsingDecl *UD); Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS, bool HasUsingKeyword, SourceLocation UsingLoc, CXXScopeSpec &SS, UnqualifiedId &Name, AttributeList *AttrList, bool HasTypenameKeyword, SourceLocation TypenameLoc); Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS, MultiTemplateParamsArg TemplateParams, SourceLocation UsingLoc, UnqualifiedId &Name, AttributeList *AttrList, TypeResult Type); /// BuildCXXConstructExpr - Creates a complete call to a constructor, /// including handling of its default argument expressions. /// /// \param ConstructKind - a CXXConstructExpr::ConstructionKind ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); // FIXME: Can we remove this and have the above BuildCXXConstructExpr check if // the constructor can be elidable? ExprResult BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType, CXXConstructorDecl *Constructor, bool Elidable, MultiExprArg Exprs, bool HadMultipleCandidates, bool IsListInitialization, bool IsStdInitListInitialization, bool RequiresZeroInit, unsigned ConstructKind, SourceRange ParenRange); ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field); /// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating /// the default expr if needed. ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD, ParmVarDecl *Param); /// FinalizeVarWithDestructor - Prepare for calling destructor on the /// constructed variable. void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType); /// \brief Helper class that collects exception specifications for /// implicitly-declared special member functions. class ImplicitExceptionSpecification { // Pointer to allow copying Sema *Self; // We order exception specifications thus: // noexcept is the most restrictive, but is only used in C++11. // throw() comes next. // Then a throw(collected exceptions) // Finally no specification, which is expressed as noexcept(false). // throw(...) is used instead if any called function uses it. ExceptionSpecificationType ComputedEST; llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen; SmallVector<QualType, 4> Exceptions; void ClearExceptions() { ExceptionsSeen.clear(); Exceptions.clear(); } public: explicit ImplicitExceptionSpecification(Sema &Self) : Self(&Self), ComputedEST(EST_BasicNoexcept) { if (!Self.getLangOpts().CPlusPlus11) ComputedEST = EST_DynamicNone; } /// \brief Get the computed exception specification type. ExceptionSpecificationType getExceptionSpecType() const { assert(ComputedEST != EST_ComputedNoexcept && "noexcept(expr) should not be a possible result"); return ComputedEST; } /// \brief The number of exceptions in the exception specification. unsigned size() const { return Exceptions.size(); } /// \brief The set of exceptions in the exception specification. const QualType *data() const { return Exceptions.data(); } /// \brief Integrate another called method into the collected data. void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method); /// \brief Integrate an invoked expression into the collected data. void CalledExpr(Expr *E); /// \brief Overwrite an EPI's exception specification with this /// computed exception specification. FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const { FunctionProtoType::ExceptionSpecInfo ESI; ESI.Type = getExceptionSpecType(); if (ESI.Type == EST_Dynamic) { ESI.Exceptions = Exceptions; } else if (ESI.Type == EST_None) { /// C++11 [except.spec]p14: /// The exception-specification is noexcept(false) if the set of /// potential exceptions of the special member function contains "any" ESI.Type = EST_ComputedNoexcept; ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(), tok::kw_false).get(); } return ESI; } }; /// \brief Determine what sort of exception specification a defaulted /// copy constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// default constructor of a class will have, and whether the parameter /// will be const. ImplicitExceptionSpecification ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defautled /// copy assignment operator of a class will have, and whether the /// parameter will be const. ImplicitExceptionSpecification ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// constructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted move /// assignment operator of a class will have. ImplicitExceptionSpecification ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification a defaulted /// destructor of a class will have. ImplicitExceptionSpecification ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD); /// \brief Determine what sort of exception specification an inheriting /// constructor of a class will have. ImplicitExceptionSpecification ComputeInheritingCtorExceptionSpec(CXXConstructorDecl *CD); /// \brief Evaluate the implicit exception specification for a defaulted /// special member function. void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD); /// \brief Check the given exception-specification and update the /// exception specification information with the results. void checkExceptionSpecification(bool IsTopLevel, ExceptionSpecificationType EST, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr, SmallVectorImpl<QualType> &Exceptions, FunctionProtoType::ExceptionSpecInfo &ESI); /// \brief Determine if we're in a case where we need to (incorrectly) eagerly /// parse an exception specification to work around a libstdc++ bug. bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D); /// \brief Add an exception-specification to the given member function /// (or member function template). The exception-specification was parsed /// after the method itself was declared. void actOnDelayedExceptionSpecification(Decl *Method, ExceptionSpecificationType EST, SourceRange SpecificationRange, ArrayRef<ParsedType> DynamicExceptions, ArrayRef<SourceRange> DynamicExceptionRanges, Expr *NoexceptExpr); /// \brief Determine if a special member function should have a deleted /// definition when it is defaulted. bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM, bool Diagnose = false); /// \brief Declare the implicit default constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// default constructor will be added. /// /// \returns The implicitly-declared default constructor. CXXConstructorDecl *DeclareImplicitDefaultConstructor( CXXRecordDecl *ClassDecl); /// DefineImplicitDefaultConstructor - Checks for feasibility of /// defining this constructor as the default constructor. void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit destructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// destructor will be added. /// /// \returns The implicitly-declared destructor. CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl); /// DefineImplicitDestructor - Checks for feasibility of /// defining this destructor as the default destructor. void DefineImplicitDestructor(SourceLocation CurrentLocation, CXXDestructorDecl *Destructor); /// \brief Build an exception spec for destructors that don't have one. /// /// C++11 says that user-defined destructors with no exception spec get one /// that looks as if the destructor was implicitly declared. void AdjustDestructorExceptionSpec(CXXRecordDecl *ClassDecl, CXXDestructorDecl *Destructor); /// \brief Declare all inheriting constructors for the given class. /// /// \param ClassDecl The class declaration into which the inheriting /// constructors will be added. void DeclareInheritingConstructors(CXXRecordDecl *ClassDecl); /// \brief Define the specified inheriting constructor. void DefineInheritingConstructor(SourceLocation UseLoc, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy constructor for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy constructor will be added. /// /// \returns The implicitly-declared copy constructor. CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitCopyConstructor - Checks for feasibility of /// defining this constructor as the copy constructor. void DefineImplicitCopyConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit move constructor for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move constructor will be added. /// /// \returns The implicitly-declared move constructor, or NULL if it wasn't /// declared. CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl); /// DefineImplicitMoveConstructor - Checks for feasibility of /// defining this constructor as the move constructor. void DefineImplicitMoveConstructor(SourceLocation CurrentLocation, CXXConstructorDecl *Constructor); /// \brief Declare the implicit copy assignment operator for the given class. /// /// \param ClassDecl The class declaration into which the implicit /// copy assignment operator will be added. /// /// \returns The implicitly-declared copy assignment operator. CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared copy assignment operator. void DefineImplicitCopyAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Declare the implicit move assignment operator for the given class. /// /// \param ClassDecl The Class declaration into which the implicit /// move assignment operator will be added. /// /// \returns The implicitly-declared move assignment operator, or NULL if it /// wasn't declared. CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl); /// \brief Defines an implicitly-declared move assignment operator. void DefineImplicitMoveAssignment(SourceLocation CurrentLocation, CXXMethodDecl *MethodDecl); /// \brief Force the declaration of any implicitly-declared members of this /// class. void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class); /// \brief Determine whether the given function is an implicitly-deleted /// special member function. bool isImplicitlyDeleted(FunctionDecl *FD); /// \brief Check whether 'this' shows up in the type of a static member /// function after the (naturally empty) cv-qualifier-seq would be. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method); /// \brief Whether this' shows up in the exception specification of a static /// member function. bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method); /// \brief Check whether 'this' shows up in the attributes of the given /// static member function. /// /// \returns true if an error occurred. bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method); /// MaybeBindToTemporary - If the passed in expression has a record type with /// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise /// it simply returns the passed in expression. ExprResult MaybeBindToTemporary(Expr *E); bool CompleteConstructorCall(CXXConstructorDecl *Constructor, MultiExprArg ArgsPtr, SourceLocation Loc, SmallVectorImpl<Expr*> &ConvertedArgs, bool AllowExplicit = false, bool IsListInitialization = false); ParsedType getInheritingConstructorName(CXXScopeSpec &SS, SourceLocation NameLoc, IdentifierInfo &Name); ParsedType getDestructorName(SourceLocation TildeLoc, IdentifierInfo &II, SourceLocation NameLoc, Scope *S, CXXScopeSpec &SS, ParsedType ObjectType, bool EnteringContext); ParsedType getDestructorType(const DeclSpec& DS, ParsedType ObjectType); // Checks that reinterpret casts don't have undefined behavior. void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType, bool IsDereference, SourceRange Range); /// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's. ExprResult ActOnCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, SourceLocation LAngleBracketLoc, Declarator &D, SourceLocation RAngleBracketLoc, SourceLocation LParenLoc, Expr *E, SourceLocation RParenLoc); ExprResult BuildCXXNamedCast(SourceLocation OpLoc, tok::TokenKind Kind, TypeSourceInfo *Ty, Expr *E, SourceRange AngleBrackets, SourceRange Parens); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXTypeId(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXTypeid - Parse typeid( something ). ExprResult ActOnCXXTypeid(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, TypeSourceInfo *Operand, SourceLocation RParenLoc); ExprResult BuildCXXUuidof(QualType TypeInfoType, SourceLocation TypeidLoc, Expr *Operand, SourceLocation RParenLoc); /// ActOnCXXUuidof - Parse __uuidof( something ). ExprResult ActOnCXXUuidof(SourceLocation OpLoc, SourceLocation LParenLoc, bool isType, void *TyOrExpr, SourceLocation RParenLoc); /// \brief Handle a C++1z fold-expression: ( expr op ... op expr ). ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, tok::TokenKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS, BinaryOperatorKind Operator, SourceLocation EllipsisLoc, Expr *RHS, SourceLocation RParenLoc); ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc, BinaryOperatorKind Operator); //// ActOnCXXThis - Parse 'this' pointer. ExprResult ActOnCXXThis(SourceLocation loc); /// \brief Try to retrieve the type of the 'this' pointer. /// /// \returns The type of 'this', if possible. Otherwise, returns a NULL type. QualType getCurrentThisType(); /// \brief When non-NULL, the C++ 'this' expression is allowed despite the /// current context not being a non-static member function. In such cases, /// this provides the type used for 'this'. QualType CXXThisTypeOverride; /// \brief RAII object used to temporarily allow the C++ 'this' expression /// to be used, with the given qualifiers on the current class type. class CXXThisScopeRAII { Sema &S; QualType OldCXXThisTypeOverride; bool Enabled; public: /// \brief Introduce a new scope where 'this' may be allowed (when enabled), /// using the given declaration (which is either a class template or a /// class) along with the given qualifiers. /// along with the qualifiers placed on '*this'. CXXThisScopeRAII(Sema &S, Decl *ContextDecl, unsigned CXXThisTypeQuals, bool Enabled = true); ~CXXThisScopeRAII(); }; /// \brief Make sure the value of 'this' is actually available in the current /// context, if it is a potentially evaluated context. /// /// \param Loc The location at which the capture of 'this' occurs. /// /// \param Explicit Whether 'this' is explicitly captured in a lambda /// capture list. /// /// \param FunctionScopeIndexToStopAt If non-null, it points to the index /// of the FunctionScopeInfo stack beyond which we do not attempt to capture. /// This is useful when enclosing lambdas must speculatively capture /// 'this' that may or may not be used in certain specializations of /// a nested generic lambda (depending on whether the name resolves to /// a non-static member function or a static function). /// \return returns 'true' if failed, 'false' if success. bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false, bool BuildAndDiagnose = true, const unsigned *const FunctionScopeIndexToStopAt = nullptr); /// \brief Determine whether the given type is the type of *this that is used /// outside of the body of a member function for a type that is currently /// being defined. bool isThisOutsideMemberFunctionBody(QualType BaseType); /// ActOnCXXBoolLiteral - Parse {true,false} literals. ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals. ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind); /// ActOnCXXNullPtrLiteral - Parse 'nullptr'. ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc); //// ActOnCXXThrow - Parse throw expressions. ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr); ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex, bool IsThrownVarInScope); ExprResult CheckCXXThrowOperand(SourceLocation ThrowLoc, Expr *E, bool IsThrownVarInScope); /// ActOnCXXTypeConstructExpr - Parse construction of a specified type. /// Can be interpreted either as function-style casting ("int(x)") /// or class type construction ("ClassType(x,y,z)") /// or creation of a value-initialized type ("int()"). ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type, SourceLocation LParenLoc, MultiExprArg Exprs, SourceLocation RParenLoc); /// ActOnCXXNew - Parsed a C++ 'new' expression. ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, Declarator &D, Expr *Initializer); ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal, SourceLocation PlacementLParen, MultiExprArg PlacementArgs, SourceLocation PlacementRParen, SourceRange TypeIdParens, QualType AllocType, TypeSourceInfo *AllocTypeInfo, Expr *ArraySize, SourceRange DirectInitRange, Expr *Initializer, bool TypeMayContainAuto = true); bool CheckAllocatedType(QualType AllocType, SourceLocation Loc, SourceRange R); bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range, bool UseGlobal, QualType AllocType, bool IsArray, MultiExprArg PlaceArgs, FunctionDecl *&OperatorNew, FunctionDecl *&OperatorDelete); bool FindAllocationOverload(SourceLocation StartLoc, SourceRange Range, DeclarationName Name, MultiExprArg Args, DeclContext *Ctx, bool AllowMissing, FunctionDecl *&Operator, bool Diagnose = true); void DeclareGlobalNewDelete(); void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return, QualType Param1, QualType Param2 = QualType(), bool addMallocAttr = false); bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD, DeclarationName Name, FunctionDecl* &Operator, bool Diagnose = true); FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc, bool CanProvideSize, DeclarationName Name); /// ActOnCXXDelete - Parsed a C++ 'delete' expression ExprResult ActOnCXXDelete(SourceLocation StartLoc, bool UseGlobal, bool ArrayForm, Expr *Operand); DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D); ExprResult CheckConditionVariable(VarDecl *ConditionVar, SourceLocation StmtLoc, bool ConvertToBoolean); ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen, Expr *Operand, SourceLocation RParen); ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand, SourceLocation RParen); /// \brief Parsed one of the type trait support pseudo-functions. ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<ParsedType> Args, SourceLocation RParenLoc); ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc, ArrayRef<TypeSourceInfo *> Args, SourceLocation RParenLoc); /// ActOnArrayTypeTrait - Parsed one of the bianry type trait support /// pseudo-functions. ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, ParsedType LhsTy, Expr *DimExpr, SourceLocation RParen); ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT, SourceLocation KWLoc, TypeSourceInfo *TSInfo, Expr *DimExpr, SourceLocation RParen); /// ActOnExpressionTrait - Parsed one of the unary type trait support /// pseudo-functions. ExprResult ActOnExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult BuildExpressionTrait(ExpressionTrait OET, SourceLocation KWLoc, Expr *Queried, SourceLocation RParen); ExprResult ActOnStartCXXMemberReference(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, ParsedType &ObjectType, bool &MayBePseudoDestructor); ExprResult DiagnoseDtorReference(SourceLocation NameLoc, Expr *MemExpr); ExprResult BuildPseudoDestructorExpr(Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, const CXXScopeSpec &SS, TypeSourceInfo *ScopeType, SourceLocation CCLoc, SourceLocation TildeLoc, PseudoDestructorTypeStorage DestroyedType, bool HasTrailingLParen); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, CXXScopeSpec &SS, UnqualifiedId &FirstTypeName, SourceLocation CCLoc, SourceLocation TildeLoc, UnqualifiedId &SecondTypeName, bool HasTrailingLParen); ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base, SourceLocation OpLoc, tok::TokenKind OpKind, SourceLocation TildeLoc, const DeclSpec& DS, bool HasTrailingLParen); /// MaybeCreateExprWithCleanups - If the current full-expression /// requires any cleanups, surround it with a ExprWithCleanups node. /// Otherwise, just returns the passed-in expression. Expr *MaybeCreateExprWithCleanups(Expr *SubExpr); Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt); ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr); ExprResult ActOnFinishFullExpr(Expr *Expr) { return ActOnFinishFullExpr(Expr, Expr ? Expr->getExprLoc() : SourceLocation()); } ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC, bool DiscardedValue = false, bool IsConstexpr = false, bool IsLambdaInitCaptureInitializer = false); StmtResult ActOnFinishFullStmt(Stmt *Stmt); // Marks SS invalid if it represents an incomplete type. bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC); DeclContext *computeDeclContext(QualType T); DeclContext *computeDeclContext(const CXXScopeSpec &SS, bool EnteringContext = false); bool isDependentScopeSpecifier(const CXXScopeSpec &SS); CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS); /// \brief The parser has parsed a global nested-name-specifier '::'. /// /// \param CCLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS); /// \brief The parser has parsed a '__super' nested-name-specifier. /// /// \param SuperLoc The location of the '__super' keyword. /// /// \param ColonColonLoc The location of the '::'. /// /// \param SS The nested-name-specifier, which will be updated in-place /// to reflect the parsed nested-name-specifier. /// /// \returns true if an error occurred, false otherwise. bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc, SourceLocation ColonColonLoc, CXXScopeSpec &SS); bool isAcceptableNestedNameSpecifier(const NamedDecl *SD); NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS); bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation IdLoc, IdentifierInfo &II, ParsedType ObjectType); bool BuildCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, QualType ObjectType, bool EnteringContext, CXXScopeSpec &SS, NamedDecl *ScopeLookupResult, bool ErrorRecoveryLookup, bool *IsCorrectedToColon = nullptr); /// \brief The parser has parsed a nested-name-specifier 'identifier::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param Identifier The identifier preceding the '::'. /// /// \param IdentifierLoc The location of the identifier. /// /// \param CCLoc The location of the '::'. /// /// \param ObjectType The type of the object, if we're parsing /// nested-name-specifier in a member access expression. /// /// \param EnteringContext Whether we're entering the context nominated by /// this nested-name-specifier. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param ErrorRecoveryLookup If true, then this method is called to improve /// error recovery. In this case do not emit error message. /// /// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':' /// are allowed. The bool value pointed by this parameter is set to 'true' /// if the identifier is treated as if it was followed by ':', not '::'. /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation CCLoc, ParsedType ObjectType, bool EnteringContext, CXXScopeSpec &SS, bool ErrorRecoveryLookup = false, bool *IsCorrectedToColon = nullptr); ExprResult ActOnDecltypeExpression(Expr *E); bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS, const DeclSpec &DS, SourceLocation ColonColonLoc); bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS, IdentifierInfo &Identifier, SourceLocation IdentifierLoc, SourceLocation ColonLoc, ParsedType ObjectType, bool EnteringContext); /// \brief The parser has parsed a nested-name-specifier /// 'template[opt] template-name < template-args >::'. /// /// \param S The scope in which this nested-name-specifier occurs. /// /// \param SS The nested-name-specifier, which is both an input /// parameter (the nested-name-specifier before this type) and an /// output parameter (containing the full nested-name-specifier, /// including this new type). /// /// \param TemplateKWLoc the location of the 'template' keyword, if any. /// \param TemplateName the template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). /// \param CCLoc The location of the '::'. /// /// \param EnteringContext Whether we're entering the context of the /// nested-name-specifier. /// /// /// \returns true if an error occurred, false otherwise. bool ActOnCXXNestedNameSpecifier(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, SourceLocation CCLoc, bool EnteringContext); /// \brief Given a C++ nested-name-specifier, produce an annotation value /// that the parser can use later to reconstruct the given /// nested-name-specifier. /// /// \param SS A nested-name-specifier. /// /// \returns A pointer containing all of the information in the /// nested-name-specifier \p SS. void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS); /// \brief Given an annotation pointer for a nested-name-specifier, restore /// the nested-name-specifier structure. /// /// \param Annotation The annotation pointer, produced by /// \c SaveNestedNameSpecifierAnnotation(). /// /// \param AnnotationRange The source range corresponding to the annotation. /// /// \param SS The nested-name-specifier that will be updated with the contents /// of the annotation pointer. void RestoreNestedNameSpecifierAnnotation(void *Annotation, SourceRange AnnotationRange, CXXScopeSpec &SS); bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global /// scope or nested-name-specifier) is parsed, part of a declarator-id. /// After this method is called, according to [C++ 3.4.3p3], names should be /// looked up in the declarator-id's scope, until the declarator is parsed and /// ActOnCXXExitDeclaratorScope is called. /// The 'SS' should be a non-empty valid CXXScopeSpec. bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS); /// ActOnCXXExitDeclaratorScope - Called when a declarator that previously /// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same /// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well. /// Used to indicate that names should revert to being looked up in the /// defining scope. void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS); /// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an /// initializer for the declaration 'Dcl'. /// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a /// static data member of class X, names should be looked up in the scope of /// class X. void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl); /// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an /// initializer for the declaration 'Dcl'. void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl); /// \brief Create a new lambda closure type. CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange, TypeSourceInfo *Info, bool KnownDependent, LambdaCaptureDefault CaptureDefault); /// \brief Start the definition of a lambda expression. CXXMethodDecl *startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange, TypeSourceInfo *MethodType, SourceLocation EndLoc, ArrayRef<ParmVarDecl *> Params); /// \brief Endow the lambda scope info with the relevant properties. void buildLambdaScope(sema::LambdaScopeInfo *LSI, CXXMethodDecl *CallOperator, SourceRange IntroducerRange, LambdaCaptureDefault CaptureDefault, SourceLocation CaptureDefaultLoc, bool ExplicitParams, bool ExplicitResultType, bool Mutable); /// \brief Perform initialization analysis of the init-capture and perform /// any implicit conversions such as an lvalue-to-rvalue conversion if /// not being used to initialize a reference. QualType performLambdaInitCaptureInitialization(SourceLocation Loc, bool ByRef, IdentifierInfo *Id, Expr *&Init); /// \brief Create a dummy variable within the declcontext of the lambda's /// call operator, for name lookup purposes for a lambda init capture. /// /// CodeGen handles emission of lambda captures, ignoring these dummy /// variables appropriately. VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc, QualType InitCaptureType, IdentifierInfo *Id, Expr *Init); /// \brief Build the implicit field for an init-capture. FieldDecl *buildInitCaptureField(sema::LambdaScopeInfo *LSI, VarDecl *Var); /// \brief Note that we have finished the explicit captures for the /// given lambda. void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI); /// \brief Introduce the lambda parameters into scope. void addLambdaParameters(CXXMethodDecl *CallOperator, Scope *CurScope); /// \brief Deduce a block or lambda's return type based on the return /// statements present in the body. void deduceClosureReturnType(sema::CapturingScopeInfo &CSI); /// ActOnStartOfLambdaDefinition - This is called just before we start /// parsing the body of a lambda; it analyzes the explicit captures and /// arguments, and sets up various data-structures for the body of the /// lambda. void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro, Declarator &ParamInfo, Scope *CurScope); /// ActOnLambdaError - If there is an error parsing a lambda, this callback /// is invoked to pop the information about the lambda. void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope, bool IsInstantiation = false); /// ActOnLambdaExpr - This is called when the body of a lambda expression /// was successfully completed. ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body, Scope *CurScope, bool IsInstantiation = false); /// \brief Define the "body" of the conversion from a lambda object to a /// function pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToFunctionPointerConversion( SourceLocation CurrentLoc, CXXConversionDecl *Conv); /// \brief Define the "body" of the conversion from a lambda object to a /// block pointer. /// /// This routine doesn't actually define a sensible body; rather, it fills /// in the initialization expression needed to copy the lambda object into /// the block, and IR generation actually generates the real body of the /// block pointer conversion. void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc, CXXConversionDecl *Conv); ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation, SourceLocation ConvLocation, CXXConversionDecl *Conv, Expr *Src); // ParseObjCStringLiteral - Parse Objective-C string literals. ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs, Expr **Strings, unsigned NumStrings); ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S); /// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the /// numeric literal expression. Type of the expression will be "NSNumber *" /// or "id" if NSNumber is unavailable. ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number); ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc, bool Value); ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements); /// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the /// '@' prefixed parenthesized expression. The type of the expression will /// either be "NSNumber *" or "NSString *" depending on the type of /// ValueType, which is allowed to be a built-in numeric type or /// "char *" or "const char *". ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr); ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr, Expr *IndexExpr, ObjCMethodDecl *getterMethod, ObjCMethodDecl *setterMethod); ExprResult BuildObjCDictionaryLiteral(SourceRange SR, ObjCDictionaryElement *Elements, unsigned NumElements); ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc, TypeSourceInfo *EncodedTypeInfo, SourceLocation RParenLoc); ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl, CXXConversionDecl *Method, bool HadMultipleCandidates); ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc, SourceLocation EncodeLoc, SourceLocation LParenLoc, ParsedType Ty, SourceLocation RParenLoc); /// ParseObjCSelectorExpression - Build selector expression for \@selector ExprResult ParseObjCSelectorExpression(Selector Sel, SourceLocation AtLoc, SourceLocation SelLoc, SourceLocation LParenLoc, SourceLocation RParenLoc, bool WarnMultipleSelectors); /// ParseObjCProtocolExpression - Build protocol expression for \@protocol ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName, SourceLocation AtLoc, SourceLocation ProtoLoc, SourceLocation LParenLoc, SourceLocation ProtoIdLoc, SourceLocation RParenLoc); //===--------------------------------------------------------------------===// // C++ Declarations // Decl *ActOnStartLinkageSpecification(Scope *S, SourceLocation ExternLoc, Expr *LangStr, SourceLocation LBraceLoc); Decl *ActOnFinishLinkageSpecification(Scope *S, Decl *LinkageSpec, SourceLocation RBraceLoc); //===--------------------------------------------------------------------===// // C++ Classes // bool isCurrentClassName(const IdentifierInfo &II, Scope *S, const CXXScopeSpec *SS = nullptr); bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS); bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc, SourceLocation ColonLoc, AttributeList *Attrs = nullptr); NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS, Declarator &D, MultiTemplateParamsArg TemplateParameterLists, Expr *BitfieldWidth, const VirtSpecifiers &VS, InClassInitStyle InitStyle); void ActOnStartCXXInClassMemberInitializer(); void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl, SourceLocation EqualLoc, Expr *Init); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, SourceLocation LParenLoc, ArrayRef<Expr *> Args, SourceLocation RParenLoc, SourceLocation EllipsisLoc); MemInitResult ActOnMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *InitList, SourceLocation EllipsisLoc); MemInitResult BuildMemInitializer(Decl *ConstructorD, Scope *S, CXXScopeSpec &SS, IdentifierInfo *MemberOrBase, ParsedType TemplateTypeTy, const DeclSpec &DS, SourceLocation IdLoc, Expr *Init, SourceLocation EllipsisLoc); MemInitResult BuildMemberInitializer(ValueDecl *Member, Expr *Init, SourceLocation IdLoc); MemInitResult BuildBaseInitializer(QualType BaseType, TypeSourceInfo *BaseTInfo, Expr *Init, CXXRecordDecl *ClassDecl, SourceLocation EllipsisLoc); MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo, Expr *Init, CXXRecordDecl *ClassDecl); bool SetDelegatingInitializer(CXXConstructorDecl *Constructor, CXXCtorInitializer *Initializer); bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors, ArrayRef<CXXCtorInitializer *> Initializers = None); void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation); /// MarkBaseAndMemberDestructorsReferenced - Given a record decl, /// mark all the non-trivial destructors of its members and bases as /// referenced. void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc, CXXRecordDecl *Record); /// \brief The list of classes whose vtables have been used within /// this translation unit, and the source locations at which the /// first use occurred. typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse; /// \brief The list of vtables that are required but have not yet been /// materialized. SmallVector<VTableUse, 16> VTableUses; /// \brief The set of classes whose vtables have been used within /// this translation unit, and a bit that will be true if the vtable is /// required to be emitted (otherwise, it should be emitted only if needed /// by code generation). llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed; /// \brief Load any externally-stored vtable uses. void LoadExternalVTableUses(); typedef LazyVector<CXXRecordDecl *, ExternalSemaSource, &ExternalSemaSource::ReadDynamicClasses, 2, 2> DynamicClassesType; /// \brief A list of all of the dynamic classes in this translation /// unit. DynamicClassesType DynamicClasses; /// \brief Note that the vtable for the given class was used at the /// given location. void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class, bool DefinitionRequired = false); /// \brief Mark the exception specifications of all virtual member functions /// in the given class as needed. void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc, const CXXRecordDecl *RD); /// MarkVirtualMembersReferenced - Will mark all members of the given /// CXXRecordDecl referenced. void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD); /// \brief Define all of the vtables that have been used in this /// translation unit and reference any virtual members used by those /// vtables. /// /// \returns true if any work was done, false otherwise. bool DefineUsedVTables(); void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl); void ActOnMemInitializers(Decl *ConstructorDecl, SourceLocation ColonLoc, ArrayRef<CXXCtorInitializer*> MemInits, bool AnyErrors); void CheckCompletedCXXClass(CXXRecordDecl *Record); void ActOnFinishCXXMemberSpecification(Scope* S, SourceLocation RLoc, Decl *TagDecl, SourceLocation LBrac, SourceLocation RBrac, AttributeList *AttrList); void ActOnFinishCXXMemberDecls(); void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param); unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template); void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param); void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record); void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method); void ActOnFinishDelayedMemberInitializers(Decl *Record); void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD, CachedTokens &Toks); void UnmarkAsLateParsedTemplate(FunctionDecl *FD); bool IsInsideALocalClassWithinATemplateFunction(); Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, Expr *AssertMessageExpr, SourceLocation RParenLoc); Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc, Expr *AssertExpr, StringLiteral *AssertMessageExpr, SourceLocation RParenLoc, bool Failed); FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart, SourceLocation FriendLoc, TypeSourceInfo *TSInfo); Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS, MultiTemplateParamsArg TemplateParams); NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D, MultiTemplateParamsArg TemplateParams); QualType CheckConstructorDeclarator(Declarator &D, QualType R, StorageClass& SC); void CheckConstructor(CXXConstructorDecl *Constructor); QualType CheckDestructorDeclarator(Declarator &D, QualType R, StorageClass& SC); bool CheckDestructor(CXXDestructorDecl *Destructor); void CheckConversionDeclarator(Declarator &D, QualType &R, StorageClass& SC); Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion); void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD); void CheckExplicitlyDefaultedMemberExceptionSpec(CXXMethodDecl *MD, const FunctionProtoType *T); void CheckDelayedMemberExceptionSpecs(); //===--------------------------------------------------------------------===// // C++ Derived Classes // /// ActOnBaseSpecifier - Parsed a base specifier CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class, SourceRange SpecifierRange, bool Virtual, AccessSpecifier Access, TypeSourceInfo *TInfo, SourceLocation EllipsisLoc); BaseResult ActOnBaseSpecifier(Decl *classdecl, SourceRange SpecifierRange, ParsedAttributes &Attrs, bool Virtual, AccessSpecifier Access, ParsedType basetype, SourceLocation BaseLoc, SourceLocation EllipsisLoc); bool AttachBaseSpecifiers(CXXRecordDecl *Class, CXXBaseSpecifier **Bases, unsigned NumBases); void ActOnBaseSpecifiers(Decl *ClassDecl, CXXBaseSpecifier **Bases, unsigned NumBases); bool IsDerivedFrom(QualType Derived, QualType Base); bool IsDerivedFrom(QualType Derived, QualType Base, CXXBasePaths &Paths); // FIXME: I don't like this name. void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath); bool BasePathInvolvesVirtualBase(const CXXCastPath &BasePath); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, SourceLocation Loc, SourceRange Range, CXXCastPath *BasePath = nullptr, bool IgnoreAccess = false); bool CheckDerivedToBaseConversion(QualType Derived, QualType Base, unsigned InaccessibleBaseID, unsigned AmbigiousBaseConvID, SourceLocation Loc, SourceRange Range, DeclarationName Name, CXXCastPath *BasePath); std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths); bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionReturnType - Checks whether the return types are /// covariant, according to C++ [class.virtual]p5. bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New, const CXXMethodDecl *Old); /// CheckOverridingFunctionExceptionSpec - Checks whether the exception /// spec is a subset of base spec. bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New, const CXXMethodDecl *Old); bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange); /// CheckOverrideControl - Check C++11 override control semantics. void CheckOverrideControl(NamedDecl *D); /// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was /// not used in the declaration of an overriding method. void DiagnoseAbsenceOfOverrideControl(NamedDecl *D); /// CheckForFunctionMarkedFinal - Checks whether a virtual member function /// overrides a virtual member function marked 'final', according to /// C++11 [class.virtual]p4. bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New, const CXXMethodDecl *Old); //===--------------------------------------------------------------------===// // C++ Access Control // enum AccessResult { AR_accessible, AR_inaccessible, AR_dependent, AR_delayed }; bool SetMemberAccessSpecifier(NamedDecl *MemberDecl, NamedDecl *PrevMemberDecl, AccessSpecifier LexicalAS); AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E, DeclAccessPair FoundDecl); AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E, DeclAccessPair FoundDecl); AccessResult CheckAllocationAccess(SourceLocation OperatorLoc, SourceRange PlacementRange, CXXRecordDecl *NamingClass, DeclAccessPair FoundDecl, bool Diagnose = true); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, bool IsCopyBindingRefToTemp = false); AccessResult CheckConstructorAccess(SourceLocation Loc, CXXConstructorDecl *D, const InitializedEntity &Entity, AccessSpecifier Access, const PartialDiagnostic &PDiag); AccessResult CheckDestructorAccess(SourceLocation Loc, CXXDestructorDecl *Dtor, const PartialDiagnostic &PDiag, QualType objectType = QualType()); AccessResult CheckFriendAccess(NamedDecl *D); AccessResult CheckMemberAccess(SourceLocation UseLoc, CXXRecordDecl *NamingClass, DeclAccessPair Found); AccessResult CheckMemberOperatorAccess(SourceLocation Loc, Expr *ObjectExpr, Expr *ArgExpr, DeclAccessPair FoundDecl); AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr, DeclAccessPair FoundDecl); AccessResult CheckBaseClassAccess(SourceLocation AccessLoc, QualType Base, QualType Derived, const CXXBasePath &Path, unsigned DiagID, bool ForceCheck = false, bool ForceUnprivileged = false); void CheckLookupAccess(const LookupResult &R); bool IsSimplyAccessible(NamedDecl *decl, DeclContext *Ctx); bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl, AccessSpecifier access, QualType objectType); void HandleDependentAccessCheck(const DependentDiagnostic &DD, const MultiLevelTemplateArgumentList &TemplateArgs); void PerformDependentDiagnostics(const DeclContext *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx); /// \brief When true, access checking violations are treated as SFINAE /// failures rather than hard errors. bool AccessCheckingSFINAE; enum AbstractDiagSelID { AbstractNone = -1, AbstractReturnType, AbstractParamType, AbstractVariableType, AbstractFieldType, AbstractIvarType, AbstractSynthesizedIvarType, AbstractArrayType }; bool RequireNonAbstractType(SourceLocation Loc, QualType T, TypeDiagnoser &Diagnoser); template<typename T1> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1) { BoundTypeDiagnoser1<T1> Diagnoser(DiagID, Arg1); return RequireNonAbstractType(Loc, T, Diagnoser); } template<typename T1, typename T2> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2) { BoundTypeDiagnoser2<T1, T2> Diagnoser(DiagID, Arg1, Arg2); return RequireNonAbstractType(Loc, T, Diagnoser); } template<typename T1, typename T2, typename T3> bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, const T1 &Arg1, const T2 &Arg2, const T3 &Arg3) { BoundTypeDiagnoser3<T1, T2, T3> Diagnoser(DiagID, Arg1, Arg2, Arg3); return RequireNonAbstractType(Loc, T, Diagnoser); } void DiagnoseAbstractType(const CXXRecordDecl *RD); bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID, AbstractDiagSelID SelID = AbstractNone); //===--------------------------------------------------------------------===// // C++ Overloaded Operators [C++ 13.5] // bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl); bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl); //===--------------------------------------------------------------------===// // C++ Templates [C++ 14] // void FilterAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); bool hasAnyAcceptableTemplateNames(LookupResult &R, bool AllowFunctionTemplates = true); void LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS, QualType ObjectType, bool EnteringContext, bool &MemberOfUnknownSpecialization); TemplateNameKind isTemplateName(Scope *S, CXXScopeSpec &SS, bool hasTemplateKeyword, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template, bool &MemberOfUnknownSpecialization); bool DiagnoseUnknownTemplateName(const IdentifierInfo &II, SourceLocation IILoc, Scope *S, const CXXScopeSpec *SS, TemplateTy &SuggestedTemplate, TemplateNameKind &SuggestedKind); void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl); TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl); Decl *ActOnTypeParameter(Scope *S, bool Typename, SourceLocation EllipsisLoc, SourceLocation KeyLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedType DefaultArg); QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc); Decl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D, unsigned Depth, unsigned Position, SourceLocation EqualLoc, Expr *DefaultArg); Decl *ActOnTemplateTemplateParameter(Scope *S, SourceLocation TmpLoc, TemplateParameterList *Params, SourceLocation EllipsisLoc, IdentifierInfo *ParamName, SourceLocation ParamNameLoc, unsigned Depth, unsigned Position, SourceLocation EqualLoc, ParsedTemplateArgument DefaultArg); TemplateParameterList * ActOnTemplateParameterList(unsigned Depth, SourceLocation ExportLoc, SourceLocation TemplateLoc, SourceLocation LAngleLoc, Decl **Params, unsigned NumParams, SourceLocation RAngleLoc); /// \brief The context in which we are checking a template parameter list. enum TemplateParamListContext { TPC_ClassTemplate, TPC_VarTemplate, TPC_FunctionTemplate, TPC_ClassTemplateMember, TPC_FriendClassTemplate, TPC_FriendFunctionTemplate, TPC_FriendFunctionTemplateDefinition, TPC_TypeAliasTemplate }; bool CheckTemplateParameterList(TemplateParameterList *NewParams, TemplateParameterList *OldParams, TemplateParamListContext TPC); TemplateParameterList *MatchTemplateParametersToScopeSpecifier( SourceLocation DeclStartLoc, SourceLocation DeclLoc, const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId, ArrayRef<TemplateParameterList *> ParamLists, bool IsFriend, bool &IsExplicitSpecialization, bool &Invalid); DeclResult CheckClassTemplate(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr, TemplateParameterList *TemplateParams, AccessSpecifier AS, SourceLocation ModulePrivateLoc, SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists, TemplateParameterList **OuterTemplateParamLists); void translateTemplateArguments(const ASTTemplateArgsPtr &In, TemplateArgumentListInfo &Out); void NoteAllFoundTemplates(TemplateName Name); QualType CheckTemplateIdType(TemplateName Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs); TypeResult ActOnTemplateIdType(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy Template, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, bool IsCtorOrDtorName = false); /// \brief Parsed an elaborated-type-specifier that refers to a template-id, /// such as \c class T::template apply<U>. TypeResult ActOnTagTemplateIdType(TagUseKind TUK, TypeSpecifierType TagSpec, SourceLocation TagLoc, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, TemplateTy TemplateD, SourceLocation TemplateLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgsIn, SourceLocation RAngleLoc); DeclResult ActOnVarTemplateSpecialization( Scope *S, Declarator &D, TypeSourceInfo *DI, SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams, StorageClass SC, bool IsPartialSpecialization); DeclResult CheckVarTemplateId(VarTemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation TemplateNameLoc, const TemplateArgumentListInfo &TemplateArgs); ExprResult CheckVarTemplateId(const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, VarTemplateDecl *Template, SourceLocation TemplateLoc, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS, SourceLocation TemplateKWLoc, LookupResult &R, bool RequiresADL, const TemplateArgumentListInfo *TemplateArgs); ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS, SourceLocation TemplateKWLoc, const DeclarationNameInfo &NameInfo, const TemplateArgumentListInfo *TemplateArgs); TemplateNameKind ActOnDependentTemplateName(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc, UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext, TemplateTy &Template); DeclResult ActOnClassTemplateSpecialization(Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc, SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId, AttributeList *Attr, MultiTemplateParamsArg TemplateParameterLists); Decl *ActOnTemplateDeclarator(Scope *S, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); Decl *ActOnStartOfFunctionTemplateDef(Scope *FnBodyScope, MultiTemplateParamsArg TemplateParameterLists, Declarator &D); bool CheckSpecializationInstantiationRedecl(SourceLocation NewLoc, TemplateSpecializationKind NewTSK, NamedDecl *PrevDecl, TemplateSpecializationKind PrevTSK, SourceLocation PrevPtOfInstantiation, bool &SuppressNew); bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD, const TemplateArgumentListInfo &ExplicitTemplateArgs, LookupResult &Previous); bool CheckFunctionTemplateSpecialization(FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs, LookupResult &Previous); bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS, TemplateTy Template, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, unsigned TagSpec, SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc, AttributeList *Attr); DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc, Declarator &D); TemplateArgumentLoc SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, Decl *Param, SmallVectorImpl<TemplateArgument> &Converted, bool &HasDefaultArg); /// \brief Specifies the context in which a particular template /// argument is being checked. enum CheckTemplateArgumentKind { /// \brief The template argument was specified in the code or was /// instantiated with some deduced template arguments. CTAK_Specified, /// \brief The template argument was deduced via template argument /// deduction. CTAK_Deduced, /// \brief The template argument was deduced from an array bound /// via template argument deduction. CTAK_DeducedFromArrayBound }; bool CheckTemplateArgument(NamedDecl *Param, TemplateArgumentLoc &Arg, NamedDecl *Template, SourceLocation TemplateLoc, SourceLocation RAngleLoc, unsigned ArgumentPackIndex, SmallVectorImpl<TemplateArgument> &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); /// \brief Check that the given template arguments can be be provided to /// the given template, converting the arguments along the way. /// /// \param Template The template to which the template arguments are being /// provided. /// /// \param TemplateLoc The location of the template name in the source. /// /// \param TemplateArgs The list of template arguments. If the template is /// a template template parameter, this function may extend the set of /// template arguments to also include substituted, defaulted template /// arguments. /// /// \param PartialTemplateArgs True if the list of template arguments is /// intentionally partial, e.g., because we're checking just the initial /// set of template arguments. /// /// \param Converted Will receive the converted, canonicalized template /// arguments. /// /// \returns true if an error occurred, false otherwise. bool CheckTemplateArgumentList(TemplateDecl *Template, SourceLocation TemplateLoc, TemplateArgumentListInfo &TemplateArgs, bool PartialTemplateArgs, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param, TemplateArgumentLoc &Arg, SmallVectorImpl<TemplateArgument> &Converted); bool CheckTemplateArgument(TemplateTypeParmDecl *Param, TypeSourceInfo *Arg); ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param, QualType InstantiatedParamType, Expr *Arg, TemplateArgument &Converted, CheckTemplateArgumentKind CTAK = CTAK_Specified); bool CheckTemplateArgument(TemplateTemplateParmDecl *Param, TemplateArgumentLoc &Arg, unsigned ArgumentPackIndex); ExprResult BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg, QualType ParamType, SourceLocation Loc); ExprResult BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg, SourceLocation Loc); /// \brief Enumeration describing how template parameter lists are compared /// for equality. enum TemplateParameterListEqualKind { /// \brief We are matching the template parameter lists of two templates /// that might be redeclarations. /// /// \code /// template<typename T> struct X; /// template<typename T> struct X; /// \endcode TPL_TemplateMatch, /// \brief We are matching the template parameter lists of two template /// template parameters as part of matching the template parameter lists /// of two templates that might be redeclarations. /// /// \code /// template<template<int I> class TT> struct X; /// template<template<int Value> class Other> struct X; /// \endcode TPL_TemplateTemplateParmMatch, /// \brief We are matching the template parameter lists of a template /// template argument against the template parameter lists of a template /// template parameter. /// /// \code /// template<template<int Value> class Metafun> struct X; /// template<int Value> struct integer_c; /// X<integer_c> xic; /// \endcode TPL_TemplateTemplateArgumentMatch }; bool TemplateParameterListsAreEqual(TemplateParameterList *New, TemplateParameterList *Old, bool Complain, TemplateParameterListEqualKind Kind, SourceLocation TemplateArgLoc = SourceLocation()); bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams); /// \brief Called when the parser has parsed a C++ typename /// specifier, e.g., "typename T::type". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param II the identifier we're retrieving (e.g., 'type' in the example). /// \param IdLoc the location of the identifier. TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, const IdentifierInfo &II, SourceLocation IdLoc); /// \brief Called when the parser has parsed a C++ typename /// specifier that ends in a template-id, e.g., /// "typename MetaFun::template apply<T1, T2>". /// /// \param S The scope in which this typename type occurs. /// \param TypenameLoc the location of the 'typename' keyword /// \param SS the nested-name-specifier following the typename (e.g., 'T::'). /// \param TemplateLoc the location of the 'template' keyword, if any. /// \param TemplateName The template name. /// \param TemplateNameLoc The location of the template name. /// \param LAngleLoc The location of the opening angle bracket ('<'). /// \param TemplateArgs The template arguments. /// \param RAngleLoc The location of the closing angle bracket ('>'). TypeResult ActOnTypenameType(Scope *S, SourceLocation TypenameLoc, const CXXScopeSpec &SS, SourceLocation TemplateLoc, TemplateTy TemplateName, SourceLocation TemplateNameLoc, SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc); QualType CheckTypenameType(ElaboratedTypeKeyword Keyword, SourceLocation KeywordLoc, NestedNameSpecifierLoc QualifierLoc, const IdentifierInfo &II, SourceLocation IILoc); TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T, SourceLocation Loc, DeclarationName Name); bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS); ExprResult RebuildExprInCurrentInstantiation(Expr *E); bool RebuildTemplateParamsInCurrentInstantiation( TemplateParameterList *Params); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgumentList &Args); std::string getTemplateArgumentBindingsText(const TemplateParameterList *Params, const TemplateArgument *Args, unsigned NumArgs); //===--------------------------------------------------------------------===// // C++ Variadic Templates (C++0x [temp.variadic]) //===--------------------------------------------------------------------===// /// Determine whether an unexpanded parameter pack might be permitted in this /// location. Useful for error recovery. bool isUnexpandedParameterPackPermitted(); /// \brief The context in which an unexpanded parameter pack is /// being diagnosed. /// /// Note that the values of this enumeration line up with the first /// argument to the \c err_unexpanded_parameter_pack diagnostic. enum UnexpandedParameterPackContext { /// \brief An arbitrary expression. UPPC_Expression = 0, /// \brief The base type of a class type. UPPC_BaseType, /// \brief The type of an arbitrary declaration. UPPC_DeclarationType, /// \brief The type of a data member. UPPC_DataMemberType, /// \brief The size of a bit-field. UPPC_BitFieldWidth, /// \brief The expression in a static assertion. UPPC_StaticAssertExpression, /// \brief The fixed underlying type of an enumeration. UPPC_FixedUnderlyingType, /// \brief The enumerator value. UPPC_EnumeratorValue, /// \brief A using declaration. UPPC_UsingDeclaration, /// \brief A friend declaration. UPPC_FriendDeclaration, /// \brief A declaration qualifier. UPPC_DeclarationQualifier, /// \brief An initializer. UPPC_Initializer, /// \brief A default argument. UPPC_DefaultArgument, /// \brief The type of a non-type template parameter. UPPC_NonTypeTemplateParameterType, /// \brief The type of an exception. UPPC_ExceptionType, /// \brief Partial specialization. UPPC_PartialSpecialization, /// \brief Microsoft __if_exists. UPPC_IfExists, /// \brief Microsoft __if_not_exists. UPPC_IfNotExists, /// \brief Lambda expression. UPPC_Lambda, /// \brief Block expression, UPPC_Block }; /// \brief Diagnose unexpanded parameter packs. /// /// \param Loc The location at which we should emit the diagnostic. /// /// \param UPPC The context in which we are diagnosing unexpanded /// parameter packs. /// /// \param Unexpanded the set of unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc, UnexpandedParameterPackContext UPPC, ArrayRef<UnexpandedParameterPack> Unexpanded); /// \brief If the given type contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The source location where a diagnostc should be emitted. /// /// \param T The type that is being checked for unexpanded parameter /// packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T, UnexpandedParameterPackContext UPPC); /// \brief If the given expression contains an unexpanded parameter /// pack, diagnose the error. /// /// \param E The expression that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(Expr *E, UnexpandedParameterPackContext UPPC = UPPC_Expression); /// \brief If the given nested-name-specifier contains an unexpanded /// parameter pack, diagnose the error. /// /// \param SS The nested-name-specifier that is being checked for /// unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS, UnexpandedParameterPackContext UPPC); /// \brief If the given name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param NameInfo The name (with source location information) that /// is being checked for unexpanded parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo, UnexpandedParameterPackContext UPPC); /// \brief If the given template name contains an unexpanded parameter pack, /// diagnose the error. /// /// \param Loc The location of the template name. /// /// \param Template The template name that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TemplateName Template, UnexpandedParameterPackContext UPPC); /// \brief If the given template argument contains an unexpanded parameter /// pack, diagnose the error. /// /// \param Arg The template argument that is being checked for unexpanded /// parameter packs. /// /// \returns true if an error occurred, false otherwise. bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg, UnexpandedParameterPackContext UPPC); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgument Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// template argument. /// /// \param Arg The template argument that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param T The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(QualType T, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// type. /// /// \param TL The type that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(TypeLoc TL, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// nested-name-specifier. /// /// \param SS The nested-name-specifier that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(CXXScopeSpec &SS, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Collect the set of unexpanded parameter packs within the given /// name. /// /// \param NameInfo The name that will be traversed to find /// unexpanded parameter packs. void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo, SmallVectorImpl<UnexpandedParameterPack> &Unexpanded); /// \brief Invoked when parsing a template argument followed by an /// ellipsis, which creates a pack expansion. /// /// \param Arg The template argument preceding the ellipsis, which /// may already be invalid. /// /// \param EllipsisLoc The location of the ellipsis. ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg, SourceLocation EllipsisLoc); /// \brief Invoked when parsing a type followed by an ellipsis, which /// creates a pack expansion. /// /// \param Type The type preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Construct a pack expansion type from the pattern of the pack /// expansion. QualType CheckPackExpansion(QualType Pattern, SourceRange PatternRange, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc); /// \brief Invoked when parsing an expression followed by an ellipsis, which /// creates a pack expansion. /// /// \param Pattern The expression preceding the ellipsis, which will become /// the pattern of the pack expansion. /// /// \param EllipsisLoc The location of the ellipsis. ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc, Optional<unsigned> NumExpansions); /// \brief Determine whether we could expand a pack expansion with the /// given set of parameter packs into separate arguments by repeatedly /// transforming the pattern. /// /// \param EllipsisLoc The location of the ellipsis that identifies the /// pack expansion. /// /// \param PatternRange The source range that covers the entire pattern of /// the pack expansion. /// /// \param Unexpanded The set of unexpanded parameter packs within the /// pattern. /// /// \param ShouldExpand Will be set to \c true if the transformer should /// expand the corresponding pack expansions into separate arguments. When /// set, \c NumExpansions must also be set. /// /// \param RetainExpansion Whether the caller should add an unexpanded /// pack expansion after all of the expanded arguments. This is used /// when extending explicitly-specified template argument packs per /// C++0x [temp.arg.explicit]p9. /// /// \param NumExpansions The number of separate arguments that will be in /// the expanded form of the corresponding pack expansion. This is both an /// input and an output parameter, which can be set by the caller if the /// number of expansions is known a priori (e.g., due to a prior substitution) /// and will be set by the callee when the number of expansions is known. /// The callee must set this value when \c ShouldExpand is \c true; it may /// set this value in other cases. /// /// \returns true if an error occurred (e.g., because the parameter packs /// are to be instantiated with arguments of different lengths), false /// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions) /// must be set. bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc, SourceRange PatternRange, ArrayRef<UnexpandedParameterPack> Unexpanded, const MultiLevelTemplateArgumentList &TemplateArgs, bool &ShouldExpand, bool &RetainExpansion, Optional<unsigned> &NumExpansions); /// \brief Determine the number of arguments in the given pack expansion /// type. /// /// This routine assumes that the number of arguments in the expansion is /// consistent across all of the unexpanded parameter packs in its pattern. /// /// Returns an empty Optional if the type can't be expanded. Optional<unsigned> getNumArgumentsInExpansion(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Determine whether the given declarator contains any unexpanded /// parameter packs. /// /// This routine is used by the parser to disambiguate function declarators /// with an ellipsis prior to the ')', e.g., /// /// \code /// void f(T...); /// \endcode /// /// To determine whether we have an (unnamed) function parameter pack or /// a variadic function. /// /// \returns true if the declarator contains any unexpanded parameter packs, /// false otherwise. bool containsUnexpandedParameterPacks(Declarator &D); /// \brief Returns the pattern of the pack expansion for a template argument. /// /// \param OrigLoc The template argument to expand. /// /// \param Ellipsis Will be set to the location of the ellipsis. /// /// \param NumExpansions Will be set to the number of expansions that will /// be generated from this pack expansion, if known a priori. TemplateArgumentLoc getTemplateArgumentPackExpansionPattern( TemplateArgumentLoc OrigLoc, SourceLocation &Ellipsis, Optional<unsigned> &NumExpansions) const; //===--------------------------------------------------------------------===// // C++ Template Argument Deduction (C++ [temp.deduct]) //===--------------------------------------------------------------------===// QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType); /// \brief Describes the result of template argument deduction. /// /// The TemplateDeductionResult enumeration describes the result of /// template argument deduction, as returned from /// DeduceTemplateArguments(). The separate TemplateDeductionInfo /// structure provides additional information about the results of /// template argument deduction, e.g., the deduced template argument /// list (if successful) or the specific template parameters or /// deduced arguments that were involved in the failure. enum TemplateDeductionResult { /// \brief Template argument deduction was successful. TDK_Success = 0, /// \brief The declaration was invalid; do nothing. TDK_Invalid, /// \brief Template argument deduction exceeded the maximum template /// instantiation depth (which has already been diagnosed). TDK_InstantiationDepth, /// \brief Template argument deduction did not deduce a value /// for every template parameter. TDK_Incomplete, /// \brief Template argument deduction produced inconsistent /// deduced values for the given template parameter. TDK_Inconsistent, /// \brief Template argument deduction failed due to inconsistent /// cv-qualifiers on a template parameter type that would /// otherwise be deduced, e.g., we tried to deduce T in "const T" /// but were given a non-const "X". TDK_Underqualified, /// \brief Substitution of the deduced template argument values /// resulted in an error. TDK_SubstitutionFailure, /// \brief A non-depnedent component of the parameter did not match the /// corresponding component of the argument. TDK_NonDeducedMismatch, /// \brief When performing template argument deduction for a function /// template, there were too many call arguments. TDK_TooManyArguments, /// \brief When performing template argument deduction for a function /// template, there were too few call arguments. TDK_TooFewArguments, /// \brief The explicitly-specified template arguments were not valid /// template arguments for the given template. TDK_InvalidExplicitArguments, /// \brief The arguments included an overloaded function name that could /// not be resolved to a suitable function. TDK_FailedOverloadResolution, /// \brief Deduction failed; that's all we know. TDK_MiscellaneousDeductionFailure }; TemplateDeductionResult DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial, const TemplateArgumentList &TemplateArgs, sema::TemplateDeductionInfo &Info); TemplateDeductionResult SubstituteExplicitTemplateArguments( FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo &ExplicitTemplateArgs, SmallVectorImpl<DeducedTemplateArgument> &Deduced, SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType, sema::TemplateDeductionInfo &Info); /// brief A function argument from which we performed template argument // deduction for a call. struct OriginalCallArg { OriginalCallArg(QualType OriginalParamType, unsigned ArgIdx, QualType OriginalArgType) : OriginalParamType(OriginalParamType), ArgIdx(ArgIdx), OriginalArgType(OriginalArgType) { } QualType OriginalParamType; unsigned ArgIdx; QualType OriginalArgType; }; TemplateDeductionResult FinishTemplateArgumentDeduction(FunctionTemplateDecl *FunctionTemplate, SmallVectorImpl<DeducedTemplateArgument> &Deduced, unsigned NumExplicitlySpecified, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, QualType ArgFunctionType, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, QualType ToType, CXXConversionDecl *&Specialization, sema::TemplateDeductionInfo &Info); TemplateDeductionResult DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate, TemplateArgumentListInfo *ExplicitTemplateArgs, FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info, bool InOverloadResolution = false); /// \brief Substitute Replacement for \p auto in \p TypeWithAuto QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement); /// \brief Substitute Replacement for auto in TypeWithAuto TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto, QualType Replacement); /// \brief Result type of DeduceAutoType. enum DeduceAutoResult { DAR_Succeeded, DAR_Failed, DAR_FailedAlreadyDiagnosed }; DeduceAutoResult DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result); DeduceAutoResult DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result); void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init); bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc, bool Diagnose = true); TypeLoc getReturnTypeLoc(FunctionDecl *FD) const; bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD, SourceLocation ReturnLoc, Expr *&RetExpr, AutoType *AT); FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1, FunctionTemplateDecl *FT2, SourceLocation Loc, TemplatePartialOrderingContext TPOC, unsigned NumCallArguments1, unsigned NumCallArguments2); UnresolvedSetIterator getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd, TemplateSpecCandidateSet &FailedCandidates, SourceLocation Loc, const PartialDiagnostic &NoneDiag, const PartialDiagnostic &AmbigDiag, const PartialDiagnostic &CandidateDiag, bool Complain = true, QualType TargetType = QualType()); ClassTemplatePartialSpecializationDecl * getMoreSpecializedPartialSpecialization( ClassTemplatePartialSpecializationDecl *PS1, ClassTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization( VarTemplatePartialSpecializationDecl *PS1, VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc); void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs, bool OnlyDeduced, unsigned Depth, llvm::SmallBitVector &Used); void MarkDeducedTemplateParameters( const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced) { return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced); } static void MarkDeducedTemplateParameters(ASTContext &Ctx, const FunctionTemplateDecl *FunctionTemplate, llvm::SmallBitVector &Deduced); //===--------------------------------------------------------------------===// // C++ Template Instantiation // MultiLevelTemplateArgumentList getTemplateInstantiationArgs(NamedDecl *D, const TemplateArgumentList *Innermost = nullptr, bool RelativeToPrimary = false, const FunctionDecl *Pattern = nullptr); /// \brief A template instantiation that is currently in progress. struct ActiveTemplateInstantiation { /// \brief The kind of template instantiation we are performing enum InstantiationKind { /// We are instantiating a template declaration. The entity is /// the declaration we're instantiating (e.g., a CXXRecordDecl). TemplateInstantiation, /// We are instantiating a default argument for a template /// parameter. The Entity is the template, and /// TemplateArgs/NumTemplateArguments provides the template /// arguments as specified. /// FIXME: Use a TemplateArgumentList DefaultTemplateArgumentInstantiation, /// We are instantiating a default argument for a function. /// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs /// provides the template arguments as specified. DefaultFunctionArgumentInstantiation, /// We are substituting explicit template arguments provided for /// a function template. The entity is a FunctionTemplateDecl. ExplicitTemplateArgumentSubstitution, /// We are substituting template argument determined as part of /// template argument deduction for either a class template /// partial specialization or a function template. The /// Entity is either a ClassTemplatePartialSpecializationDecl or /// a FunctionTemplateDecl. DeducedTemplateArgumentSubstitution, /// We are substituting prior template arguments into a new /// template parameter. The template parameter itself is either a /// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl. PriorTemplateArgumentSubstitution, /// We are checking the validity of a default template argument that /// has been used when naming a template-id. DefaultTemplateArgumentChecking, /// We are instantiating the exception specification for a function /// template which was deferred until it was needed. ExceptionSpecInstantiation } Kind; /// \brief The point of instantiation within the source code. SourceLocation PointOfInstantiation; /// \brief The template (or partial specialization) in which we are /// performing the instantiation, for substitutions of prior template /// arguments. NamedDecl *Template; /// \brief The entity that is being instantiated. Decl *Entity; /// \brief The list of template arguments we are substituting, if they /// are not part of the entity. const TemplateArgument *TemplateArgs; /// \brief The number of template arguments in TemplateArgs. unsigned NumTemplateArgs; /// \brief The template deduction info object associated with the /// substitution or checking of explicit or deduced template arguments. sema::TemplateDeductionInfo *DeductionInfo; /// \brief The source range that covers the construct that cause /// the instantiation, e.g., the template-id that causes a class /// template instantiation. SourceRange InstantiationRange; ActiveTemplateInstantiation() : Kind(TemplateInstantiation), Template(nullptr), Entity(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0), DeductionInfo(nullptr) {} /// \brief Determines whether this template is an actual instantiation /// that should be counted toward the maximum instantiation depth. bool isInstantiationRecord() const; friend bool operator==(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { if (X.Kind != Y.Kind) return false; if (X.Entity != Y.Entity) return false; switch (X.Kind) { case TemplateInstantiation: case ExceptionSpecInstantiation: return true; case PriorTemplateArgumentSubstitution: case DefaultTemplateArgumentChecking: return X.Template == Y.Template && X.TemplateArgs == Y.TemplateArgs; case DefaultTemplateArgumentInstantiation: case ExplicitTemplateArgumentSubstitution: case DeducedTemplateArgumentSubstitution: case DefaultFunctionArgumentInstantiation: return X.TemplateArgs == Y.TemplateArgs; } llvm_unreachable("Invalid InstantiationKind!"); } friend bool operator!=(const ActiveTemplateInstantiation &X, const ActiveTemplateInstantiation &Y) { return !(X == Y); } }; /// \brief List of active template instantiations. /// /// This vector is treated as a stack. As one template instantiation /// requires another template instantiation, additional /// instantiations are pushed onto the stack up to a /// user-configurable limit LangOptions::InstantiationDepth. SmallVector<ActiveTemplateInstantiation, 16> ActiveTemplateInstantiations; /// \brief Extra modules inspected when performing a lookup during a template /// instantiation. Computed lazily. SmallVector<Module*, 16> ActiveTemplateInstantiationLookupModules; /// \brief Cache of additional modules that should be used for name lookup /// within the current template instantiation. Computed lazily; use /// getLookupModules() to get a complete set. llvm::DenseSet<Module*> LookupModulesCache; /// \brief Get the set of additional modules that should be checked during /// name lookup. A module and its imports become visible when instanting a /// template defined within it. llvm::DenseSet<Module*> &getLookupModules(); /// \brief Whether we are in a SFINAE context that is not associated with /// template instantiation. /// /// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside /// of a template instantiation or template argument deduction. bool InNonInstantiationSFINAEContext; /// \brief The number of ActiveTemplateInstantiation entries in /// \c ActiveTemplateInstantiations that are not actual instantiations and, /// therefore, should not be counted as part of the instantiation depth. unsigned NonInstantiationEntries; /// \brief The last template from which a template instantiation /// error or warning was produced. /// /// This value is used to suppress printing of redundant template /// instantiation backtraces when there are multiple errors in the /// same instantiation. FIXME: Does this belong in Sema? It's tough /// to implement it anywhere else. ActiveTemplateInstantiation LastTemplateInstantiationErrorContext; /// \brief The current index into pack expansion arguments that will be /// used for substitution of parameter packs. /// /// The pack expansion index will be -1 to indicate that parameter packs /// should be instantiated as themselves. Otherwise, the index specifies /// which argument within the parameter pack will be used for substitution. int ArgumentPackSubstitutionIndex; /// \brief RAII object used to change the argument pack substitution index /// within a \c Sema object. /// /// See \c ArgumentPackSubstitutionIndex for more information. class ArgumentPackSubstitutionIndexRAII { Sema &Self; int OldSubstitutionIndex; public: ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex) : Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) { Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex; } ~ArgumentPackSubstitutionIndexRAII() { Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex; } }; friend class ArgumentPackSubstitutionRAII; /// \brief The stack of calls expression undergoing template instantiation. /// /// The top of this stack is used by a fixit instantiating unresolved /// function calls to fix the AST to match the textual change it prints. SmallVector<CallExpr *, 8> CallsUndergoingInstantiation; /// \brief For each declaration that involved template argument deduction, the /// set of diagnostics that were suppressed during that template argument /// deduction. /// /// FIXME: Serialize this structure to the AST file. typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> > SuppressedDiagnosticsMap; SuppressedDiagnosticsMap SuppressedDiagnostics; /// \brief A stack object to be created when performing template /// instantiation. /// /// Construction of an object of type \c InstantiatingTemplate /// pushes the current instantiation onto the stack of active /// instantiations. If the size of this stack exceeds the maximum /// number of recursive template instantiations, construction /// produces an error and evaluates true. /// /// Destruction of this object will pop the named instantiation off /// the stack. struct InstantiatingTemplate { /// \brief Note that we are instantiating a class template, /// function template, or a member thereof. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, Decl *Entity, SourceRange InstantiationRange = SourceRange()); struct ExceptionSpecification {}; /// \brief Note that we are instantiating an exception specification /// of a function template. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionDecl *Entity, ExceptionSpecification, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating a default argument in a /// template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, FunctionTemplateDecl *FunctionTemplate, ArrayRef<TemplateArgument> TemplateArgs, ActiveTemplateInstantiation::InstantiationKind Kind, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a class template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ClassTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are instantiating as part of template /// argument deduction for a variable template partial /// specialization. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, VarTemplatePartialSpecializationDecl *PartialSpec, ArrayRef<TemplateArgument> TemplateArgs, sema::TemplateDeductionInfo &DeductionInfo, SourceRange InstantiationRange = SourceRange()); InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, ParmVarDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange = SourceRange()); /// \brief Note that we are substituting prior template arguments into a /// non-type parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, NonTypeTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are substituting prior template arguments into a /// template template parameter. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, NamedDecl *Template, TemplateTemplateParmDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we are checking the default template argument /// against the template parameter for a given template-id. InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation, TemplateDecl *Template, NamedDecl *Param, ArrayRef<TemplateArgument> TemplateArgs, SourceRange InstantiationRange); /// \brief Note that we have finished instantiating this template. void Clear(); ~InstantiatingTemplate() { Clear(); } /// \brief Determines whether we have exceeded the maximum /// recursive template instantiations. bool isInvalid() const { return Invalid; } private: Sema &SemaRef; bool Invalid; bool SavedInNonInstantiationSFINAEContext; bool CheckInstantiationDepth(SourceLocation PointOfInstantiation, SourceRange InstantiationRange); // FIXME: Replace this with a constructor once we can use delegating // constructors in llvm. void Initialize( ActiveTemplateInstantiation::InstantiationKind Kind, SourceLocation PointOfInstantiation, SourceRange InstantiationRange, Decl *Entity, NamedDecl *Template = nullptr, ArrayRef<TemplateArgument> TemplateArgs = ArrayRef<TemplateArgument>(), sema::TemplateDeductionInfo *DeductionInfo = nullptr); InstantiatingTemplate(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION; InstantiatingTemplate& operator=(const InstantiatingTemplate&) LLVM_DELETED_FUNCTION; }; void PrintInstantiationStack(); /// \brief Determines whether we are currently in a context where /// template argument substitution failures are not considered /// errors. /// /// \returns An empty \c Optional if we're not in a SFINAE context. /// Otherwise, contains a pointer that, if non-NULL, contains the nearest /// template-deduction context object, which can be used to capture /// diagnostics that will be suppressed. Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const; /// \brief Determines whether we are currently in a context that /// is not evaluated as per C++ [expr] p5. bool isUnevaluatedContext() const { assert(!ExprEvalContexts.empty() && "Must be in an expression evaluation context"); return ExprEvalContexts.back().isUnevaluated(); } /// \brief RAII class used to determine whether SFINAE has /// trapped any errors that occur during template argument /// deduction. class SFINAETrap { Sema &SemaRef; unsigned PrevSFINAEErrors; bool PrevInNonInstantiationSFINAEContext; bool PrevAccessCheckingSFINAE; public: explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false) : SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors), PrevInNonInstantiationSFINAEContext( SemaRef.InNonInstantiationSFINAEContext), PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE) { if (!SemaRef.isSFINAEContext()) SemaRef.InNonInstantiationSFINAEContext = true; SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE; } ~SFINAETrap() { SemaRef.NumSFINAEErrors = PrevSFINAEErrors; SemaRef.InNonInstantiationSFINAEContext = PrevInNonInstantiationSFINAEContext; SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE; } /// \brief Determine whether any SFINAE errors have been trapped. bool hasErrorOccurred() const { return SemaRef.NumSFINAEErrors > PrevSFINAEErrors; } }; /// \brief RAII class used to indicate that we are performing provisional /// semantic analysis to determine the validity of a construct, so /// typo-correction and diagnostics in the immediate context (not within /// implicitly-instantiated templates) should be suppressed. class TentativeAnalysisScope { Sema &SemaRef; // FIXME: Using a SFINAETrap for this is a hack. SFINAETrap Trap; bool PrevDisableTypoCorrection; public: explicit TentativeAnalysisScope(Sema &SemaRef) : SemaRef(SemaRef), Trap(SemaRef, true), PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) { SemaRef.DisableTypoCorrection = true; } ~TentativeAnalysisScope() { SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection; } }; /// \brief The current instantiation scope used to store local /// variables. LocalInstantiationScope *CurrentInstantiationScope; /// \brief Tracks whether we are in a context where typo correction is /// disabled. bool DisableTypoCorrection; /// \brief The number of typos corrected by CorrectTypo. unsigned TyposCorrected; typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet; typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations; /// \brief A cache containing identifiers for which typo correction failed and /// their locations, so that repeated attempts to correct an identifier in a /// given location are ignored if typo correction already failed for it. IdentifierSourceLocations TypoCorrectionFailures; /// \brief Worker object for performing CFG-based warnings. sema::AnalysisBasedWarnings AnalysisWarnings; /// \brief An entity for which implicit template instantiation is required. /// /// The source location associated with the declaration is the first place in /// the source code where the declaration was "used". It is not necessarily /// the point of instantiation (which will be either before or after the /// namespace-scope declaration that triggered this implicit instantiation), /// However, it is the location that diagnostics should generally refer to, /// because users will need to know what code triggered the instantiation. typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation; /// \brief The queue of implicit template instantiations that are required /// but have not yet been performed. std::deque<PendingImplicitInstantiation> PendingInstantiations; class SavePendingInstantiationsAndVTableUsesRAII { public: SavePendingInstantiationsAndVTableUsesRAII(Sema &S): S(S) { SavedPendingInstantiations.swap(S.PendingInstantiations); SavedVTableUses.swap(S.VTableUses); } ~SavePendingInstantiationsAndVTableUsesRAII() { // Restore the set of pending vtables. assert(S.VTableUses.empty() && "VTableUses should be empty before it is discarded."); S.VTableUses.swap(SavedVTableUses); // Restore the set of pending implicit instantiations. assert(S.PendingInstantiations.empty() && "PendingInstantiations should be empty before it is discarded."); S.PendingInstantiations.swap(SavedPendingInstantiations); } private: Sema &S; SmallVector<VTableUse, 16> SavedVTableUses; std::deque<PendingImplicitInstantiation> SavedPendingInstantiations; }; /// \brief The queue of implicit template instantiations that are required /// and must be performed within the current local scope. /// /// This queue is only used for member functions of local classes in /// templates, which must be instantiated in the same scope as their /// enclosing function, so that they can reference function-local /// types, static variables, enumerators, etc. std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations; class SavePendingLocalImplicitInstantiationsRAII { public: SavePendingLocalImplicitInstantiationsRAII(Sema &S): S(S) { SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } ~SavePendingLocalImplicitInstantiationsRAII() { assert(S.PendingLocalImplicitInstantiations.empty() && "there shouldn't be any pending local implicit instantiations"); SavedPendingLocalImplicitInstantiations.swap( S.PendingLocalImplicitInstantiations); } private: Sema &S; std::deque<PendingImplicitInstantiation> SavedPendingLocalImplicitInstantiations; }; void PerformPendingInstantiations(bool LocalOnly = false); TypeSourceInfo *SubstType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); QualType SubstType(QualType T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstType(TypeLoc TL, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity); TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T, const MultiLevelTemplateArgumentList &TemplateArgs, SourceLocation Loc, DeclarationName Entity, CXXRecordDecl *ThisContext, unsigned ThisTypeQuals); void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto, const MultiLevelTemplateArgumentList &Args); ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs, int indexAdjustment, Optional<unsigned> NumExpansions, bool ExpectParameterPack); bool SubstParmTypes(SourceLocation Loc, ParmVarDecl **Params, unsigned NumParams, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<QualType> &ParamTypes, SmallVectorImpl<ParmVarDecl *> *OutParams = nullptr); ExprResult SubstExpr(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs); /// \brief Substitute the given template arguments into a list of /// expressions, expanding pack expansions if required. /// /// \param Exprs The list of expressions to substitute into. /// /// \param NumExprs The number of expressions in \p Exprs. /// /// \param IsCall Whether this is some form of call, in which case /// default arguments will be dropped. /// /// \param TemplateArgs The set of template arguments to substitute. /// /// \param Outputs Will receive all of the substituted arguments. /// /// \returns true if an error occurred, false otherwise. bool SubstExprs(Expr **Exprs, unsigned NumExprs, bool IsCall, const MultiLevelTemplateArgumentList &TemplateArgs, SmallVectorImpl<Expr *> &Outputs); StmtResult SubstStmt(Stmt *S, const MultiLevelTemplateArgumentList &TemplateArgs); Decl *SubstDecl(Decl *D, DeclContext *Owner, const MultiLevelTemplateArgumentList &TemplateArgs); ExprResult SubstInitializer(Expr *E, const MultiLevelTemplateArgumentList &TemplateArgs, bool CXXDirectInit); bool SubstBaseSpecifiers(CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); bool InstantiateClass(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK, bool Complain = true); bool InstantiateEnum(SourceLocation PointOfInstantiation, EnumDecl *Instantiation, EnumDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); bool InstantiateInClassInitializer( SourceLocation PointOfInstantiation, FieldDecl *Instantiation, FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs); struct LateInstantiatedAttribute { const Attr *TmplAttr; LocalInstantiationScope *Scope; Decl *NewDecl; LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S, Decl *D) : TmplAttr(A), Scope(S), NewDecl(D) { } }; typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec; void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs, const Decl *Pattern, Decl *Inst, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *OuterMostScope = nullptr); bool InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK, bool Complain = true); void InstantiateClassMembers(SourceLocation PointOfInstantiation, CXXRecordDecl *Instantiation, const MultiLevelTemplateArgumentList &TemplateArgs, TemplateSpecializationKind TSK); void InstantiateClassTemplateSpecializationMembers( SourceLocation PointOfInstantiation, ClassTemplateSpecializationDecl *ClassTemplateSpec, TemplateSpecializationKind TSK); NestedNameSpecifierLoc SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS, const MultiLevelTemplateArgumentList &TemplateArgs); DeclarationNameInfo SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo, const MultiLevelTemplateArgumentList &TemplateArgs); TemplateName SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name, SourceLocation Loc, const MultiLevelTemplateArgumentList &TemplateArgs); bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs, TemplateArgumentListInfo &Result, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateExceptionSpec(SourceLocation PointOfInstantiation, FunctionDecl *Function); void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation, FunctionDecl *Function, bool Recursive = false, bool DefinitionRequired = false); VarTemplateSpecializationDecl *BuildVarTemplateInstantiation( VarTemplateDecl *VarTemplate, VarDecl *FromVar, const TemplateArgumentList &TemplateArgList, const TemplateArgumentListInfo &TemplateArgsInfo, SmallVectorImpl<TemplateArgument> &Converted, SourceLocation PointOfInstantiation, void *InsertPos, LateInstantiatedAttrVec *LateAttrs = nullptr, LocalInstantiationScope *StartingScope = nullptr); VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl( VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl, const MultiLevelTemplateArgumentList &TemplateArgs); void BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs, LateInstantiatedAttrVec *LateAttrs, DeclContext *Owner, LocalInstantiationScope *StartingScope, bool InstantiatingVarTemplate = false); void InstantiateVariableInitializer( VarDecl *Var, VarDecl *OldVar, const MultiLevelTemplateArgumentList &TemplateArgs); void InstantiateVariableDefinition(SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateStaticDataMemberDefinition( SourceLocation PointOfInstantiation, VarDecl *Var, bool Recursive = false, bool DefinitionRequired = false); void InstantiateMemInitializers(CXXConstructorDecl *New, const CXXConstructorDecl *Tmpl, const MultiLevelTemplateArgumentList &TemplateArgs); NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D, const MultiLevelTemplateArgumentList &TemplateArgs); DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC, const MultiLevelTemplateArgumentList &TemplateArgs); // Objective-C declarations. enum ObjCContainerKind { OCK_None = -1, OCK_Interface = 0, OCK_Protocol, OCK_Category, OCK_ClassExtension, OCK_Implementation, OCK_CategoryImplementation }; ObjCContainerKind getObjCContainerKind() const; Decl *ActOnStartClassInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperName, SourceLocation SuperLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs, IdentifierInfo *SuperName, SourceLocation SuperLoc); Decl *ActOnCompatibilityAlias( SourceLocation AtCompatibilityAliasLoc, IdentifierInfo *AliasName, SourceLocation AliasLocation, IdentifierInfo *ClassName, SourceLocation ClassLocation); bool CheckForwardProtocolDeclarationForCircularDependency( IdentifierInfo *PName, SourceLocation &PLoc, SourceLocation PrevLoc, const ObjCList<ObjCProtocolDecl> &PList); Decl *ActOnStartProtocolInterface( SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName, SourceLocation ProtocolLoc, Decl * const *ProtoRefNames, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc, AttributeList *AttrList); Decl *ActOnStartCategoryInterface(SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CategoryName, SourceLocation CategoryLoc, Decl * const *ProtoRefs, unsigned NumProtoRefs, const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc); Decl *ActOnStartClassImplementation( SourceLocation AtClassImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *SuperClassname, SourceLocation SuperClassLoc); Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc, IdentifierInfo *ClassName, SourceLocation ClassLoc, IdentifierInfo *CatName, SourceLocation CatLoc); DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl, ArrayRef<Decl *> Decls); DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc, IdentifierInfo **IdentList, SourceLocation *IdentLocs, unsigned NumElts); DeclGroupPtrTy ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc, const IdentifierLocPair *IdentList, unsigned NumElts, AttributeList *attrList); void FindProtocolDeclaration(bool WarnOnDeclarations, const IdentifierLocPair *ProtocolId, unsigned NumProtocols, SmallVectorImpl<Decl *> &Protocols); /// Ensure attributes are consistent with type. /// \param [in, out] Attributes The attributes to check; they will /// be modified to be consistent with \p PropertyTy. void CheckObjCPropertyAttributes(Decl *PropertyPtrTy, SourceLocation Loc, unsigned &Attributes, bool propertyInPrimaryClass); /// Process the specified property declaration and create decls for the /// setters and getters as needed. /// \param property The property declaration being processed /// \param CD The semantic container for the property /// \param redeclaredProperty Declaration for property if redeclared /// in class extension. /// \param lexicalDC Container for redeclaredProperty. void ProcessPropertyDecl(ObjCPropertyDecl *property, ObjCContainerDecl *CD, ObjCPropertyDecl *redeclaredProperty = nullptr, ObjCContainerDecl *lexicalDC = nullptr); void DiagnosePropertyMismatch(ObjCPropertyDecl *Property, ObjCPropertyDecl *SuperProperty, const IdentifierInfo *Name, bool OverridingProtocolProperty); void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT, ObjCInterfaceDecl *ID); Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd, ArrayRef<Decl *> allMethods = None, ArrayRef<DeclGroupPtrTy> allTUVars = None); Decl *ActOnProperty(Scope *S, SourceLocation AtLoc, SourceLocation LParenLoc, FieldDeclarator &FD, ObjCDeclSpec &ODS, Selector GetterSel, Selector SetterSel, bool *OverridingProperty, tok::ObjCKeywordKind MethodImplKind, DeclContext *lexicalDC = nullptr); Decl *ActOnPropertyImplDecl(Scope *S, SourceLocation AtLoc, SourceLocation PropertyLoc, bool ImplKind, IdentifierInfo *PropertyId, IdentifierInfo *PropertyIvar, SourceLocation PropertyIvarLoc); enum ObjCSpecialMethodKind { OSMK_None, OSMK_Alloc, OSMK_New, OSMK_Copy, OSMK_RetainingInit, OSMK_NonRetainingInit }; struct ObjCArgInfo { IdentifierInfo *Name; SourceLocation NameLoc; // The Type is null if no type was specified, and the DeclSpec is invalid // in this case. ParsedType Type; ObjCDeclSpec DeclSpec; /// ArgAttrs - Attribute list for this argument. AttributeList *ArgAttrs; }; Decl *ActOnMethodDeclaration( Scope *S, SourceLocation BeginLoc, // location of the + or -. SourceLocation EndLoc, // location of the ; or {. tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType, ArrayRef<SourceLocation> SelectorLocs, Selector Sel, // optional arguments. The number of types/arguments is obtained // from the Sel.getNumArgs(). ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo, unsigned CNumArgs, // c-style args AttributeList *AttrList, tok::ObjCKeywordKind MethodImplKind, bool isVariadic, bool MethodDefinition); ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel, const ObjCObjectPointerType *OPT, bool IsInstance); ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty, bool IsInstance); bool CheckARCMethodDecl(ObjCMethodDecl *method); bool inferObjCARCLifetime(ValueDecl *decl); ExprResult HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT, Expr *BaseExpr, SourceLocation OpLoc, DeclarationName MemberName, SourceLocation MemberLoc, SourceLocation SuperLoc, QualType SuperType, bool Super); ExprResult ActOnClassPropertyRefExpr(IdentifierInfo &receiverName, IdentifierInfo &propertyName, SourceLocation receiverNameLoc, SourceLocation propertyNameLoc); ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc); /// \brief Describes the kind of message expression indicated by a message /// send that starts with an identifier. enum ObjCMessageKind { /// \brief The message is sent to 'super'. ObjCSuperMessage, /// \brief The message is an instance message. ObjCInstanceMessage, /// \brief The message is a class message, and the identifier is a type /// name. ObjCClassMessage }; ObjCMessageKind getObjCMessageKind(Scope *S, IdentifierInfo *Name, SourceLocation NameLoc, bool IsSuper, bool HasTrailingDot, ParsedType &ReceiverType); ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildClassMessageImplicit(QualType ReceiverType, bool isSuperReceiver, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnClassMessage(Scope *S, ParsedType Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildInstanceMessage(Expr *Receiver, QualType ReceiverType, SourceLocation SuperLoc, Selector Sel, ObjCMethodDecl *Method, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args, bool isImplicit = false); ExprResult BuildInstanceMessageImplicit(Expr *Receiver, QualType ReceiverType, SourceLocation Loc, Selector Sel, ObjCMethodDecl *Method, MultiExprArg Args); ExprResult ActOnInstanceMessage(Scope *S, Expr *Receiver, Selector Sel, SourceLocation LBracLoc, ArrayRef<SourceLocation> SelectorLocs, SourceLocation RBracLoc, MultiExprArg Args); ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, TypeSourceInfo *TSInfo, Expr *SubExpr); ExprResult ActOnObjCBridgedCast(Scope *S, SourceLocation LParenLoc, ObjCBridgeCastKind Kind, SourceLocation BridgeKeywordLoc, ParsedType Type, SourceLocation RParenLoc, Expr *SubExpr); void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr); void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr); bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr, CastKind &Kind); bool checkObjCBridgeRelatedComponents(SourceLocation Loc, QualType DestType, QualType SrcType, ObjCInterfaceDecl *&RelatedClass, ObjCMethodDecl *&ClassMethod, ObjCMethodDecl *&InstanceMethod, TypedefNameDecl *&TDNDecl, bool CfToNs); bool CheckObjCBridgeRelatedConversions(SourceLocation Loc, QualType DestType, QualType SrcType, Expr *&SrcExpr); bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr); bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall); /// \brief Check whether the given new method is a valid override of the /// given overridden method, and set any properties that should be inherited. void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod, const ObjCMethodDecl *Overridden); /// \brief Describes the compatibility of a result type with its method. enum ResultTypeCompatibilityKind { RTC_Compatible, RTC_Incompatible, RTC_Unknown }; void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod, ObjCInterfaceDecl *CurrentClass, ResultTypeCompatibilityKind RTC); enum PragmaOptionsAlignKind { POAK_Native, // #pragma options align=native POAK_Natural, // #pragma options align=natural POAK_Packed, // #pragma options align=packed POAK_Power, // #pragma options align=power POAK_Mac68k, // #pragma options align=mac68k POAK_Reset // #pragma options align=reset }; /// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align. void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind, SourceLocation PragmaLoc); enum PragmaPackKind { PPK_Default, // #pragma pack([n]) PPK_Show, // #pragma pack(show), only supported by MSVC. PPK_Push, // #pragma pack(push, [identifier], [n]) PPK_Pop // #pragma pack(pop, [identifier], [n]) }; enum PragmaMSStructKind { PMSST_OFF, // #pragms ms_struct off PMSST_ON // #pragms ms_struct on }; enum PragmaMSCommentKind { PCK_Unknown, PCK_Linker, // #pragma comment(linker, ...) PCK_Lib, // #pragma comment(lib, ...) PCK_Compiler, // #pragma comment(compiler, ...) PCK_ExeStr, // #pragma comment(exestr, ...) PCK_User // #pragma comment(user, ...) }; /// ActOnPragmaPack - Called on well formed \#pragma pack(...). void ActOnPragmaPack(PragmaPackKind Kind, IdentifierInfo *Name, Expr *Alignment, SourceLocation PragmaLoc, SourceLocation LParenLoc, SourceLocation RParenLoc); /// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off]. void ActOnPragmaMSStruct(PragmaMSStructKind Kind); /// ActOnPragmaMSComment - Called on well formed /// \#pragma comment(kind, "arg"). void ActOnPragmaMSComment(PragmaMSCommentKind Kind, StringRef Arg); /// ActOnPragmaMSPointersToMembers - called on well formed \#pragma /// pointers_to_members(representation method[, general purpose /// representation]). void ActOnPragmaMSPointersToMembers( LangOptions::PragmaMSPointersToMembersKind Kind, SourceLocation PragmaLoc); /// \brief Called on well formed \#pragma vtordisp(). void ActOnPragmaMSVtorDisp(PragmaVtorDispKind Kind, SourceLocation PragmaLoc, MSVtorDispAttr::Mode Value); enum PragmaSectionKind { PSK_DataSeg, PSK_BSSSeg, PSK_ConstSeg, PSK_CodeSeg, }; bool UnifySection(StringRef SectionName, int SectionFlags, DeclaratorDecl *TheDecl); bool UnifySection(StringRef SectionName, int SectionFlags, SourceLocation PragmaSectionLocation); /// \brief Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg. void ActOnPragmaMSSeg(SourceLocation PragmaLocation, PragmaMsStackAction Action, llvm::StringRef StackSlotLabel, StringLiteral *SegmentName, llvm::StringRef PragmaName); /// \brief Called on well formed \#pragma section(). void ActOnPragmaMSSection(SourceLocation PragmaLocation, int SectionFlags, StringLiteral *SegmentName); /// \brief Called on well-formed \#pragma init_seg(). void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation, StringLiteral *SegmentName); /// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch void ActOnPragmaDetectMismatch(StringRef Name, StringRef Value); /// ActOnPragmaUnused - Called on well-formed '\#pragma unused'. void ActOnPragmaUnused(const Token &Identifier, Scope *curScope, SourceLocation PragmaLoc); /// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... . void ActOnPragmaVisibility(const IdentifierInfo* VisType, SourceLocation PragmaLoc); NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II, SourceLocation Loc); void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W); /// ActOnPragmaWeakID - Called on well formed \#pragma weak ident. void ActOnPragmaWeakID(IdentifierInfo* WeakName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc); /// ActOnPragmaRedefineExtname - Called on well formed /// \#pragma redefine_extname oldname newname. void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident. void ActOnPragmaWeakAlias(IdentifierInfo* WeakName, IdentifierInfo* AliasName, SourceLocation PragmaLoc, SourceLocation WeakNameLoc, SourceLocation AliasNameLoc); /// ActOnPragmaFPContract - Called on well formed /// \#pragma {STDC,OPENCL} FP_CONTRACT void ActOnPragmaFPContract(tok::OnOffSwitch OOS); /// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to /// a the record decl, to handle '\#pragma pack' and '\#pragma options align'. void AddAlignmentAttributesForRecord(RecordDecl *RD); /// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record. void AddMsStructLayoutForRecord(RecordDecl *RD); /// FreePackedContext - Deallocate and null out PackContext. void FreePackedContext(); /// PushNamespaceVisibilityAttr - Note that we've entered a /// namespace with a visibility attribute. void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr, SourceLocation Loc); /// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used, /// add an appropriate visibility attribute. void AddPushedVisibilityAttribute(Decl *RD); /// PopPragmaVisibility - Pop the top element of the visibility stack; used /// for '\#pragma GCC visibility' and visibility attributes on namespaces. void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc); /// FreeVisContext - Deallocate and null out VisContext. void FreeVisContext(); /// AddCFAuditedAttribute - Check whether we're currently within /// '\#pragma clang arc_cf_code_audited' and, if so, consider adding /// the appropriate attribute. void AddCFAuditedAttribute(Decl *D); /// \brief Called on well formed \#pragma clang optimize. void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc); /// \brief Get the location for the currently active "\#pragma clang optimize /// off". If this location is invalid, then the state of the pragma is "on". SourceLocation getOptimizeOffPragmaLocation() const { return OptimizeOffPragmaLocation; } /// \brief Only called on function definitions; if there is a pragma in scope /// with the effect of a range-based optnone, consider marking the function /// with attribute optnone. void AddRangeBasedOptnone(FunctionDecl *FD); /// \brief Adds the 'optnone' attribute to the function declaration if there /// are no conflicts; Loc represents the location causing the 'optnone' /// attribute to be added (usually because of a pragma). void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc); /// AddAlignedAttr - Adds an aligned attribute to a particular declaration. void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex, bool IsPackExpansion); void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T, unsigned SpellingListIndex, bool IsPackExpansion); /// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular /// declaration. void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE, unsigned SpellingListIndex); /// AddAlignValueAttr - Adds an align_value attribute to a particular /// declaration. void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E, unsigned SpellingListIndex); // OpenMP directives and clauses. private: void *VarDataSharingAttributesStack; /// \brief Initialization of data-sharing attributes stack. void InitDataSharingAttributesStack(); void DestroyDataSharingAttributesStack(); ExprResult VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind); /// \brief Checks if the specified variable is used in one of the private /// clauses in OpenMP constructs. bool IsOpenMPCapturedVar(VarDecl *VD); public: ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc, Expr *Op); /// \brief Called on start of new data sharing attribute block. void StartOpenMPDSABlock(OpenMPDirectiveKind K, const DeclarationNameInfo &DirName, Scope *CurScope, SourceLocation Loc); /// \brief Called on end of data sharing attribute block. void EndOpenMPDSABlock(Stmt *CurDirective); // OpenMP directives and clauses. /// \brief Called on correct id-expression from the '#pragma omp /// threadprivate'. ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec, const DeclarationNameInfo &Id); /// \brief Called on well-formed '#pragma omp threadprivate'. DeclGroupPtrTy ActOnOpenMPThreadprivateDirective( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Builds a new OpenMPThreadPrivateDecl and checks its correctness. OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl( SourceLocation Loc, ArrayRef<Expr *> VarList); /// \brief Initialization of captured region for OpenMP region. void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope); StmtResult ActOnOpenMPExecutableDirective(OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName, ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for' after parsing /// of the associated statement. StmtResult ActOnOpenMPForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp for simd' after parsing /// of the associated statement. StmtResult ActOnOpenMPForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp sections' after parsing /// of the associated statement. StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp section' after parsing of the /// associated statement. StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp single' after parsing of the /// associated statement. StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp master' after parsing of the /// associated statement. StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp critical' after parsing of the /// associated statement. StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp parallel for' after parsing /// of the associated statement. StmtResult ActOnOpenMPParallelForDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel for simd' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelForSimdDirective( ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc, llvm::DenseMap<VarDecl *, Expr *> &VarsWithImplicitDSA); /// \brief Called on well-formed '\#pragma omp parallel sections' after /// parsing of the associated statement. StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp task' after parsing of the /// associated statement. StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskyield'. StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp barrier'. StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp taskwait'. StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp flush'. StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp ordered' after parsing of the /// associated statement. StmtResult ActOnOpenMPOrderedDirective(Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp atomic' after parsing of the /// associated statement. StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp target' after parsing of the /// associated statement. StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed '\#pragma omp teams' after parsing of the /// associated statement. StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'if' clause. OMPClause *ActOnOpenMPIfClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'final' clause. OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'num_threads' clause. OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'safelen' clause. OMPClause *ActOnOpenMPSafelenClause(Expr *Length, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'collapse' clause. OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind, unsigned Argument, SourceLocation ArgumentLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'default' clause. OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'proc_bind' clause. OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind, SourceLocation KindLoc, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPSingleExprWithArgClause(OpenMPClauseKind Kind, unsigned Argument, Expr *Expr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ArgumentLoc, SourceLocation CommaLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'schedule' clause. OMPClause *ActOnOpenMPScheduleClause(OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc); OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'ordered' clause. OMPClause *ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'nowait' clause. OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'untied' clause. OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'mergeable' clause. OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'read' clause. OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'write' clause. OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'update' clause. OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'capture' clause. OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'seq_cst' clause. OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc, SourceLocation EndLoc); OMPClause * ActOnOpenMPVarListClause(OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'private' clause. OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'firstprivate' clause. OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'lastprivate' clause. OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'shared' clause. OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'reduction' clause. OMPClause * ActOnOpenMPReductionClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc, CXXScopeSpec &ReductionIdScopeSpec, const DeclarationNameInfo &ReductionId); /// \brief Called on well-formed 'linear' clause. OMPClause *ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'aligned' clause. OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList, Expr *Alignment, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyin' clause. OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'copyprivate' clause. OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief Called on well-formed 'flush' pseudo clause. OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList, SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc); /// \brief The kind of conversion being performed. enum CheckedConversionKind { /// \brief An implicit conversion. CCK_ImplicitConversion, /// \brief A C-style cast. CCK_CStyleCast, /// \brief A functional-style cast. CCK_FunctionalCast, /// \brief A cast other than a C-style cast. CCK_OtherCast }; /// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit /// cast. If there is already an implicit cast, merge into the existing one. /// If isLvalue, the result of the cast is an lvalue. ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK, ExprValueKind VK = VK_RValue, const CXXCastPath *BasePath = nullptr, CheckedConversionKind CCK = CCK_ImplicitConversion); /// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding /// to the conversion from scalar type ScalarTy to the Boolean type. static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy); /// IgnoredValueConversions - Given that an expression's result is /// syntactically ignored, perform any conversions that are /// required. ExprResult IgnoredValueConversions(Expr *E); // UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts // functions and arrays to their respective pointers (C99 6.3.2.1). ExprResult UsualUnaryConversions(Expr *E); /// CallExprUnaryConversions - a special case of an unary conversion /// performed on a function designator of a call expression. ExprResult CallExprUnaryConversions(Expr *E); // DefaultFunctionArrayConversion - converts functions and arrays // to their respective pointers (C99 6.3.2.1). ExprResult DefaultFunctionArrayConversion(Expr *E); // DefaultFunctionArrayLvalueConversion - converts functions and // arrays to their respective pointers and performs the // lvalue-to-rvalue conversion. ExprResult DefaultFunctionArrayLvalueConversion(Expr *E); // DefaultLvalueConversion - performs lvalue-to-rvalue conversion on // the operand. This is DefaultFunctionArrayLvalueConversion, // except that it assumes the operand isn't of function or array // type. ExprResult DefaultLvalueConversion(Expr *E); // DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that // do not have a prototype. Integer promotions are performed on each // argument, and arguments that have type float are promoted to double. ExprResult DefaultArgumentPromotion(Expr *E); // Used for emitting the right warning by DefaultVariadicArgumentPromotion enum VariadicCallType { VariadicFunction, VariadicBlock, VariadicMethod, VariadicConstructor, VariadicDoesNotApply }; VariadicCallType getVariadicCallType(FunctionDecl *FDecl, const FunctionProtoType *Proto, Expr *Fn); // Used for determining in which context a type is allowed to be passed to a // vararg function. enum VarArgKind { VAK_Valid, VAK_ValidInCXX11, VAK_Undefined, VAK_MSVCUndefined, VAK_Invalid }; // Determines which VarArgKind fits an expression. VarArgKind isValidVarArgType(const QualType &Ty); /// Check to see if the given expression is a valid argument to a variadic /// function, issuing a diagnostic if not. void checkVariadicArgument(const Expr *E, VariadicCallType CT); /// Check to see if a given expression could have '.c_str()' called on it. bool hasCStrMethod(const Expr *E); /// GatherArgumentsForCall - Collector argument expressions for various /// form of call prototypes. bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl, const FunctionProtoType *Proto, unsigned FirstParam, ArrayRef<Expr *> Args, SmallVectorImpl<Expr *> &AllArgs, VariadicCallType CallType = VariadicDoesNotApply, bool AllowExplicit = false, bool IsListInitialization = false); // DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but // will create a runtime trap if the resulting type is not a POD type. ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT, FunctionDecl *FDecl); // UsualArithmeticConversions - performs the UsualUnaryConversions on it's // operands and then handles various conversions that are common to binary // operators (C99 6.3.1.8). If both operands aren't arithmetic, this // routine returns the first non-arithmetic type found. The client is // responsible for emitting appropriate error diagnostics. QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS, bool IsCompAssign = false); /// AssignConvertType - All of the 'assignment' semantic checks return this /// enum to indicate whether the assignment was allowed. These checks are /// done for simple assignments, as well as initialization, return from /// function, argument passing, etc. The query is phrased in terms of a /// source and destination type. enum AssignConvertType { /// Compatible - the types are compatible according to the standard. Compatible, /// PointerToInt - The assignment converts a pointer to an int, which we /// accept as an extension. PointerToInt, /// IntToPointer - The assignment converts an int to a pointer, which we /// accept as an extension. IntToPointer, /// FunctionVoidPointer - The assignment is between a function pointer and /// void*, which the standard doesn't allow, but we accept as an extension. FunctionVoidPointer, /// IncompatiblePointer - The assignment is between two pointers types that /// are not compatible, but we accept them as an extension. IncompatiblePointer, /// IncompatiblePointer - The assignment is between two pointers types which /// point to integers which have a different sign, but are otherwise /// identical. This is a subset of the above, but broken out because it's by /// far the most common case of incompatible pointers. IncompatiblePointerSign, /// CompatiblePointerDiscardsQualifiers - The assignment discards /// c/v/r qualifiers, which we accept as an extension. CompatiblePointerDiscardsQualifiers, /// IncompatiblePointerDiscardsQualifiers - The assignment /// discards qualifiers that we don't permit to be discarded, /// like address spaces. IncompatiblePointerDiscardsQualifiers, /// IncompatibleNestedPointerQualifiers - The assignment is between two /// nested pointer types, and the qualifiers other than the first two /// levels differ e.g. char ** -> const char **, but we accept them as an /// extension. IncompatibleNestedPointerQualifiers, /// IncompatibleVectors - The assignment is between two vector types that /// have the same size, which we accept as an extension. IncompatibleVectors, /// IntToBlockPointer - The assignment converts an int to a block /// pointer. We disallow this. IntToBlockPointer, /// IncompatibleBlockPointer - The assignment is between two block /// pointers types that are not compatible. IncompatibleBlockPointer, /// IncompatibleObjCQualifiedId - The assignment is between a qualified /// id type and something else (that is incompatible with it). For example, /// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol. IncompatibleObjCQualifiedId, /// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an /// object with __weak qualifier. IncompatibleObjCWeakRef, /// Incompatible - We reject this conversion outright, it is invalid to /// represent it in the AST. Incompatible }; /// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the /// assignment conversion type specified by ConvTy. This returns true if the /// conversion was invalid or false if the conversion was accepted. bool DiagnoseAssignmentResult(AssignConvertType ConvTy, SourceLocation Loc, QualType DstType, QualType SrcType, Expr *SrcExpr, AssignmentAction Action, bool *Complained = nullptr); /// IsValueInFlagEnum - Determine if a value is allowed as part of a flag /// enum. If AllowMask is true, then we also allow the complement of a valid /// value, to be used as a mask. bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val, bool AllowMask) const; /// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant /// integer not in the range of enum values. void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType, Expr *SrcExpr); /// CheckAssignmentConstraints - Perform type checking for assignment, /// argument passing, variable initialization, and function return values. /// C99 6.5.16. AssignConvertType CheckAssignmentConstraints(SourceLocation Loc, QualType LHSType, QualType RHSType); /// Check assignment constraints and prepare for a conversion of the /// RHS to the LHS type. AssignConvertType CheckAssignmentConstraints(QualType LHSType, ExprResult &RHS, CastKind &Kind); // CheckSingleAssignmentConstraints - Currently used by // CheckAssignmentOperands, and ActOnReturnStmt. Prior to type checking, // this routine performs the default function/array converions. AssignConvertType CheckSingleAssignmentConstraints(QualType LHSType, ExprResult &RHS, bool Diagnose = true, bool DiagnoseCFAudited = false); // \brief If the lhs type is a transparent union, check whether we // can initialize the transparent union with the given expression. AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType, ExprResult &RHS); bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType); bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit = false); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, AssignmentAction Action, bool AllowExplicit, ImplicitConversionSequence& ICS); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const ImplicitConversionSequence& ICS, AssignmentAction Action, CheckedConversionKind CCK = CCK_ImplicitConversion); ExprResult PerformImplicitConversion(Expr *From, QualType ToType, const StandardConversionSequence& SCS, AssignmentAction Action, CheckedConversionKind CCK); /// the following "Check" methods will return a valid/converted QualType /// or a null QualType (indicating an error diagnostic was issued). /// type checking binary operators (subroutines of CreateBuiltinBinOp). QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS, ExprResult &RHS); QualType CheckPointerToMemberOperands( // C++ 5.5 ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, SourceLocation OpLoc, bool isIndirect); QualType CheckMultiplyDivideOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign, bool IsDivide); QualType CheckRemainderOperands( // C99 6.5.5 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckAdditionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, QualType* CompLHSTy = nullptr); QualType CheckSubtractionOperands( // C99 6.5.6 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, QualType* CompLHSTy = nullptr); QualType CheckShiftOperands( // C99 6.5.7 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc, bool IsCompAssign = false); QualType CheckCompareOperands( // C99 6.5.8/9 ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned OpaqueOpc, bool isRelational); QualType CheckBitwiseOperands( // C99 6.5.[10...12] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign = false); QualType CheckLogicalOperands( // C99 6.5.[13,14] ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, unsigned Opc); // CheckAssignmentOperands is used for both simple and compound assignment. // For simple assignment, pass both expressions and a null converted type. // For compound assignment, pass both expressions and the converted type. QualType CheckAssignmentOperands( // C99 6.5.16.[1,2] Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType); ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc, UnaryOperatorKind Opcode, Expr *Op); ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc, BinaryOperatorKind Opcode, Expr *LHS, Expr *RHS); ExprResult checkPseudoObjectRValue(Expr *E); Expr *recreateSyntacticForm(PseudoObjectExpr *E); QualType CheckConditionalOperands( // C99 6.5.15 ExprResult &Cond, ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc); QualType CXXCheckConditionalOperands( // C++ 5.16 ExprResult &cond, ExprResult &lhs, ExprResult &rhs, ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc); QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2, bool *NonStandardCompositeType = nullptr); QualType FindCompositePointerType(SourceLocation Loc, ExprResult &E1, ExprResult &E2, bool *NonStandardCompositeType = nullptr) { Expr *E1Tmp = E1.get(), *E2Tmp = E2.get(); QualType Composite = FindCompositePointerType(Loc, E1Tmp, E2Tmp, NonStandardCompositeType); E1 = E1Tmp; E2 = E2Tmp; return Composite; } QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS, SourceLocation QuestionLoc); bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr, SourceLocation QuestionLoc); void DiagnoseAlwaysNonNullPointer(Expr *E, Expr::NullPointerConstantKind NullType, bool IsEqual, SourceRange Range); /// type checking for vector binary operators. QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign); QualType GetSignedVectorType(QualType V); QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool isRelational); QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS, SourceLocation Loc); bool isLaxVectorConversion(QualType srcType, QualType destType); /// type checking declaration initializers (C99 6.7.8) bool CheckForConstantInitializer(Expr *e, QualType t); // type checking C++ declaration initializers (C++ [dcl.init]). /// ReferenceCompareResult - Expresses the result of comparing two /// types (cv1 T1 and cv2 T2) to determine their compatibility for the /// purposes of initialization by reference (C++ [dcl.init.ref]p4). enum ReferenceCompareResult { /// Ref_Incompatible - The two types are incompatible, so direct /// reference binding is not possible. Ref_Incompatible = 0, /// Ref_Related - The two types are reference-related, which means /// that their unqualified forms (T1 and T2) are either the same /// or T1 is a base class of T2. Ref_Related, /// Ref_Compatible_With_Added_Qualification - The two types are /// reference-compatible with added qualification, meaning that /// they are reference-compatible and the qualifiers on T1 (cv1) /// are greater than the qualifiers on T2 (cv2). Ref_Compatible_With_Added_Qualification, /// Ref_Compatible - The two types are reference-compatible and /// have equivalent qualifiers (cv1 == cv2). Ref_Compatible }; ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc, QualType T1, QualType T2, bool &DerivedToBase, bool &ObjCConversion, bool &ObjCLifetimeConversion); ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType, Expr *CastExpr, CastKind &CastKind, ExprValueKind &VK, CXXCastPath &Path); /// \brief Force an expression with unknown-type to an expression of the /// given type. ExprResult forceUnknownAnyToType(Expr *E, QualType ToType); /// \brief Type-check an expression that's being passed to an /// __unknown_anytype parameter. ExprResult checkUnknownAnyArg(SourceLocation callLoc, Expr *result, QualType &paramType); // CheckVectorCast - check type constraints for vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size. // returns true if the cast is invalid bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty, CastKind &Kind); // CheckExtVectorCast - check type constraints for extended vectors. // Since vectors are an extension, there are no C standard reference for this. // We allow casting between vectors and integer datatypes of the same size, // or vectors and the element type of that vector. // returns the cast expr ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr, CastKind &Kind); ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, SourceLocation LParenLoc, Expr *CastExpr, SourceLocation RParenLoc); enum ARCConversionResult { ACR_okay, ACR_unbridged }; /// \brief Checks for invalid conversions and casts between /// retainable pointers and other pointer kinds. ARCConversionResult CheckObjCARCConversion(SourceRange castRange, QualType castType, Expr *&op, CheckedConversionKind CCK, bool DiagnoseCFAudited = false, BinaryOperatorKind Opc = BO_PtrMemD ); Expr *stripARCUnbridgedCast(Expr *e); void diagnoseARCUnbridgedCast(Expr *e); bool CheckObjCARCUnavailableWeakConversion(QualType castType, QualType ExprType); /// checkRetainCycles - Check whether an Objective-C message send /// might create an obvious retain cycle. void checkRetainCycles(ObjCMessageExpr *msg); void checkRetainCycles(Expr *receiver, Expr *argument); void checkRetainCycles(VarDecl *Var, Expr *Init); /// checkUnsafeAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained type. bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS); /// checkUnsafeExprAssigns - Check whether +1 expr is being assigned /// to weak/__unsafe_unretained expression. void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS); /// CheckMessageArgumentTypes - Check types in an Obj-C message send. /// \param Method - May be null. /// \param [out] ReturnType - The return type of the send. /// \return true iff there were any incompatible types. bool CheckMessageArgumentTypes(QualType ReceiverType, MultiExprArg Args, Selector Sel, ArrayRef<SourceLocation> SelectorLocs, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage, SourceLocation lbrac, SourceLocation rbrac, SourceRange RecRange, QualType &ReturnType, ExprValueKind &VK); /// \brief Determine the result of a message send expression based on /// the type of the receiver, the method expected to receive the message, /// and the form of the message send. QualType getMessageSendResultType(QualType ReceiverType, ObjCMethodDecl *Method, bool isClassMessage, bool isSuperMessage); /// \brief If the given expression involves a message send to a method /// with a related result type, emit a note describing what happened. void EmitRelatedResultTypeNote(const Expr *E); /// \brief Given that we had incompatible pointer types in a return /// statement, check whether we're in a method with a related result /// type, and if so, emit a note describing what happened. void EmitRelatedResultTypeNoteForReturn(QualType destType); /// CheckBooleanCondition - Diagnose problems involving the use of /// the given expression as a boolean condition (e.g. in an if /// statement). Also performs the standard function and array /// decays, possibly changing the input variable. /// /// \param Loc - A location associated with the condition, e.g. the /// 'if' keyword. /// \return true iff there were any errors ExprResult CheckBooleanCondition(Expr *E, SourceLocation Loc); ExprResult ActOnBooleanCondition(Scope *S, SourceLocation Loc, Expr *SubExpr); /// DiagnoseAssignmentAsCondition - Given that an expression is /// being used as a boolean condition, warn if it's an assignment. void DiagnoseAssignmentAsCondition(Expr *E); /// \brief Redundant parentheses over an equality comparison can indicate /// that the user intended an assignment used as condition. void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE); /// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid. ExprResult CheckCXXBooleanCondition(Expr *CondExpr); /// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have /// the specified width and sign. If an overflow occurs, detect it and emit /// the specified diagnostic. void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal, unsigned NewWidth, bool NewSign, SourceLocation Loc, unsigned DiagID); /// Checks that the Objective-C declaration is declared in the global scope. /// Emits an error and marks the declaration as invalid if it's not declared /// in the global scope. bool CheckObjCDeclScope(Decl *D); /// \brief Abstract base class used for diagnosing integer constant /// expression violations. class VerifyICEDiagnoser { public: bool Suppress; VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { } virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0; virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR); virtual ~VerifyICEDiagnoser() { } }; /// VerifyIntegerConstantExpression - Verifies that an expression is an ICE, /// and reports the appropriate diagnostics. Returns false on success. /// Can optionally return the value of the expression. ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, VerifyICEDiagnoser &Diagnoser, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result, unsigned DiagID, bool AllowFold = true); ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result = nullptr); /// VerifyBitField - verifies that a bit field expression is an ICE and has /// the correct width, and that the field type is valid. /// Returns false on success. /// Can optionally return whether the bit-field is of width 0 ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName, QualType FieldTy, bool IsMsStruct, Expr *BitWidth, bool *ZeroWidth = nullptr); enum CUDAFunctionTarget { CFT_Device, CFT_Global, CFT_Host, CFT_HostDevice, CFT_InvalidTarget }; CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D); bool CheckCUDATarget(const FunctionDecl *Caller, const FunctionDecl *Callee); /// Given a implicit special member, infer its CUDA target from the /// calls it needs to make to underlying base/field special members. /// \param ClassDecl the class for which the member is being created. /// \param CSM the kind of special member. /// \param MemberDecl the special member itself. /// \param ConstRHS true if this is a copy operation with a const object on /// its RHS. /// \param Diagnose true if this call should emit diagnostics. /// \return true if there was an error inferring. /// The result of this call is implicit CUDA target attribute(s) attached to /// the member declaration. bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl, CXXSpecialMember CSM, CXXMethodDecl *MemberDecl, bool ConstRHS, bool Diagnose); /// \name Code completion //@{ /// \brief Describes the context in which code completion occurs. enum ParserCompletionContext { /// \brief Code completion occurs at top-level or namespace context. PCC_Namespace, /// \brief Code completion occurs within a class, struct, or union. PCC_Class, /// \brief Code completion occurs within an Objective-C interface, protocol, /// or category. PCC_ObjCInterface, /// \brief Code completion occurs within an Objective-C implementation or /// category implementation PCC_ObjCImplementation, /// \brief Code completion occurs within the list of instance variables /// in an Objective-C interface, protocol, category, or implementation. PCC_ObjCInstanceVariableList, /// \brief Code completion occurs following one or more template /// headers. PCC_Template, /// \brief Code completion occurs following one or more template /// headers within a class. PCC_MemberTemplate, /// \brief Code completion occurs within an expression. PCC_Expression, /// \brief Code completion occurs within a statement, which may /// also be an expression or a declaration. PCC_Statement, /// \brief Code completion occurs at the beginning of the /// initialization statement (or expression) in a for loop. PCC_ForInit, /// \brief Code completion occurs within the condition of an if, /// while, switch, or for statement. PCC_Condition, /// \brief Code completion occurs within the body of a function on a /// recovery path, where we do not have a specific handle on our position /// in the grammar. PCC_RecoveryInFunction, /// \brief Code completion occurs where only a type is permitted. PCC_Type, /// \brief Code completion occurs in a parenthesized expression, which /// might also be a type cast. PCC_ParenthesizedExpression, /// \brief Code completion occurs within a sequence of declaration /// specifiers within a function, method, or block. PCC_LocalDeclarationSpecifiers }; void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path); void CodeCompleteOrdinaryName(Scope *S, ParserCompletionContext CompletionContext); void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS, bool AllowNonIdentifiers, bool AllowNestedNameSpecifiers); struct CodeCompleteExpressionData; void CodeCompleteExpression(Scope *S, const CodeCompleteExpressionData &Data); void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, SourceLocation OpLoc, bool IsArrow); void CodeCompletePostfixExpression(Scope *S, ExprResult LHS); void CodeCompleteTag(Scope *S, unsigned TagSpec); void CodeCompleteTypeQualifiers(DeclSpec &DS); void CodeCompleteCase(Scope *S); void CodeCompleteCall(Scope *S, Expr *Fn, ArrayRef<Expr *> Args); void CodeCompleteInitializer(Scope *S, Decl *D); void CodeCompleteReturn(Scope *S); void CodeCompleteAfterIf(Scope *S); void CodeCompleteAssignmentRHS(Scope *S, Expr *LHS); void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext); void CodeCompleteUsing(Scope *S); void CodeCompleteUsingDirective(Scope *S); void CodeCompleteNamespaceDecl(Scope *S); void CodeCompleteNamespaceAliasDecl(Scope *S); void CodeCompleteOperatorName(Scope *S); void CodeCompleteConstructorInitializer( Decl *Constructor, ArrayRef<CXXCtorInitializer *> Initializers); void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro, bool AfterAmpersand); void CodeCompleteObjCAtDirective(Scope *S); void CodeCompleteObjCAtVisibility(Scope *S); void CodeCompleteObjCAtStatement(Scope *S); void CodeCompleteObjCAtExpression(Scope *S); void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS); void CodeCompleteObjCPropertyGetter(Scope *S); void CodeCompleteObjCPropertySetter(Scope *S); void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS, bool IsParameter); void CodeCompleteObjCMessageReceiver(Scope *S); void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression); void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, bool IsSuper = false); void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver, ArrayRef<IdentifierInfo *> SelIdents, bool AtArgumentExpression, ObjCInterfaceDecl *Super = nullptr); void CodeCompleteObjCForCollection(Scope *S, DeclGroupPtrTy IterationVar); void CodeCompleteObjCSelector(Scope *S, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompleteObjCProtocolReferences(IdentifierLocPair *Protocols, unsigned NumProtocols); void CodeCompleteObjCProtocolDecl(Scope *S); void CodeCompleteObjCInterfaceDecl(Scope *S); void CodeCompleteObjCSuperclass(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationDecl(Scope *S); void CodeCompleteObjCInterfaceCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCImplementationCategory(Scope *S, IdentifierInfo *ClassName, SourceLocation ClassNameLoc); void CodeCompleteObjCPropertyDefinition(Scope *S); void CodeCompleteObjCPropertySynthesizeIvar(Scope *S, IdentifierInfo *PropertyName); void CodeCompleteObjCMethodDecl(Scope *S, bool IsInstanceMethod, ParsedType ReturnType); void CodeCompleteObjCMethodDeclSelector(Scope *S, bool IsInstanceMethod, bool AtParameterName, ParsedType ReturnType, ArrayRef<IdentifierInfo *> SelIdents); void CodeCompletePreprocessorDirective(bool InConditional); void CodeCompleteInPreprocessorConditionalExclusion(Scope *S); void CodeCompletePreprocessorMacroName(bool IsDefinition); void CodeCompletePreprocessorExpression(); void CodeCompletePreprocessorMacroArgument(Scope *S, IdentifierInfo *Macro, MacroInfo *MacroInfo, unsigned Argument); void CodeCompleteNaturalLanguage(); void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator, CodeCompletionTUInfo &CCTUInfo, SmallVectorImpl<CodeCompletionResult> &Results); //@} //===--------------------------------------------------------------------===// // Extra semantic analysis beyond the C type system public: SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL, unsigned ByteNo) const; private: void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr, const ArraySubscriptExpr *ASE=nullptr, bool AllowOnePastEnd=true, bool IndexNegated=false); void CheckArrayAccess(const Expr *E); // Used to grab the relevant information from a FormatAttr and a // FunctionDeclaration. struct FormatStringInfo { unsigned FormatIdx; unsigned FirstDataArg; bool HasVAListArg; }; bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember, FormatStringInfo *FSI); bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc, ArrayRef<const Expr *> Args); bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall, const FunctionProtoType *Proto); bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto); void CheckConstructorCall(FunctionDecl *FDecl, ArrayRef<const Expr *> Args, const FunctionProtoType *Proto, SourceLocation Loc); void checkCall(NamedDecl *FDecl, ArrayRef<const Expr *> Args, unsigned NumParams, bool IsMemberFunction, SourceLocation Loc, SourceRange Range, VariadicCallType CallType); bool CheckObjCString(Expr *Arg); ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl, unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall, unsigned MaxWidth); bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall); bool SemaBuiltinVAStart(CallExpr *TheCall); bool SemaBuiltinVAStartARM(CallExpr *Call); bool SemaBuiltinUnorderedCompare(CallExpr *TheCall); bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs); public: // Used by C++ template instantiation. ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall); ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo, SourceLocation BuiltinLoc, SourceLocation RParenLoc); private: bool SemaBuiltinPrefetch(CallExpr *TheCall); bool SemaBuiltinAssume(CallExpr *TheCall); bool SemaBuiltinAssumeAligned(CallExpr *TheCall); bool SemaBuiltinLongjmp(CallExpr *TheCall); ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult); ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult, AtomicExpr::AtomicOp Op); bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum, llvm::APSInt &Result); bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low, int High); public: enum FormatStringType { FST_Scanf, FST_Printf, FST_NSString, FST_Strftime, FST_Strfmon, FST_Kprintf, FST_Unknown }; static FormatStringType GetFormatStringType(const FormatAttr *Format); void CheckFormatString(const StringLiteral *FExpr, const Expr *OrigFormatExpr, ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, bool inFunctionCall, VariadicCallType CallType, llvm::SmallBitVector &CheckedVarArgs); bool FormatStringHasSArg(const StringLiteral *FExpr); bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx); private: bool CheckFormatArguments(const FormatAttr *Format, ArrayRef<const Expr *> Args, bool IsCXXMember, VariadicCallType CallType, SourceLocation Loc, SourceRange Range, llvm::SmallBitVector &CheckedVarArgs); bool CheckFormatArguments(ArrayRef<const Expr *> Args, bool HasVAListArg, unsigned format_idx, unsigned firstDataArg, FormatStringType Type, VariadicCallType CallType, SourceLocation Loc, SourceRange range, llvm::SmallBitVector &CheckedVarArgs); void CheckAbsoluteValueFunction(const CallExpr *Call, const FunctionDecl *FDecl, IdentifierInfo *FnInfo); void CheckMemaccessArguments(const CallExpr *Call, unsigned BId, IdentifierInfo *FnName); void CheckStrlcpycatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckStrncatArguments(const CallExpr *Call, IdentifierInfo *FnName); void CheckReturnValExpr(Expr *RetValExp, QualType lhsType, SourceLocation ReturnLoc, bool isObjCMethod = false, const AttrVec *Attrs = nullptr, const FunctionDecl *FD = nullptr); void CheckFloatComparison(SourceLocation Loc, Expr* LHS, Expr* RHS); void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation()); void CheckBoolLikeConversion(Expr *E, SourceLocation CC); void CheckForIntOverflow(Expr *E); void CheckUnsequencedOperations(Expr *E); /// \brief Perform semantic checks on a completed expression. This will either /// be a full-expression or a default argument expression. void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(), bool IsConstexpr = false); void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field, Expr *Init); /// \brief Check if the given expression contains 'break' or 'continue' /// statement that produces control flow different from GCC. void CheckBreakContinueBinding(Expr *E); public: /// \brief Register a magic integral constant to be used as a type tag. void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind, uint64_t MagicValue, QualType Type, bool LayoutCompatible, bool MustBeNull); struct TypeTagData { TypeTagData() {} TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) : Type(Type), LayoutCompatible(LayoutCompatible), MustBeNull(MustBeNull) {} QualType Type; /// If true, \c Type should be compared with other expression's types for /// layout-compatibility. unsigned LayoutCompatible : 1; unsigned MustBeNull : 1; }; /// A pair of ArgumentKind identifier and magic value. This uniquely /// identifies the magic value. typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue; private: /// \brief A map from magic value to type information. std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>> TypeTagForDatatypeMagicValues; /// \brief Peform checks on a call of a function with argument_with_type_tag /// or pointer_with_type_tag attributes. void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr, const Expr * const *ExprArgs); /// \brief The parser's current scope. /// /// The parser maintains this state here. Scope *CurScope; mutable IdentifierInfo *Ident_super; mutable IdentifierInfo *Ident___float128; protected: friend class Parser; friend class InitializationSequence; friend class ASTReader; friend class ASTWriter; public: /// \brief Retrieve the parser's current scope. /// /// This routine must only be used when it is certain that semantic analysis /// and the parser are in precisely the same context, which is not the case /// when, e.g., we are performing any kind of template instantiation. /// Therefore, the only safe places to use this scope are in the parser /// itself and in routines directly invoked from the parser and *never* from /// template substitution or instantiation. Scope *getCurScope() const { return CurScope; } void incrementMSLocalManglingNumber() const { return CurScope->incrementMSLocalManglingNumber(); } IdentifierInfo *getSuperIdentifier() const; IdentifierInfo *getFloat128Identifier() const; Decl *getObjCDeclContext() const; DeclContext *getCurLexicalContext() const { return OriginalLexicalContext ? OriginalLexicalContext : CurContext; } AvailabilityResult getCurContextAvailability() const; const DeclContext *getCurObjCLexicalContext() const { const DeclContext *DC = getCurLexicalContext(); // A category implicitly has the attribute of the interface. if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC)) DC = CatD->getClassInterface(); return DC; } }; /// \brief RAII object that enters a new expression evaluation context. class EnterExpressionEvaluationContext { Sema &Actions; public: EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl, IsDecltype); } EnterExpressionEvaluationContext(Sema &Actions, Sema::ExpressionEvaluationContext NewContext, Sema::ReuseLambdaContextDecl_t, bool IsDecltype = false) : Actions(Actions) { Actions.PushExpressionEvaluationContext(NewContext, Sema::ReuseLambdaContextDecl, IsDecltype); } ~EnterExpressionEvaluationContext() { Actions.PopExpressionEvaluationContext(); } }; DeductionFailureInfo MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK, sema::TemplateDeductionInfo &Info); /// \brief Contains a late templated function. /// Will be parsed at the end of the translation unit, used by Sema & Parser. struct LateParsedTemplate { CachedTokens Toks; /// \brief The template function declaration to be late parsed. Decl *D; }; } // end namespace clang #endif
pm2lpt.c
#include <string.h> #include <math.h> #include <mpi.h> #include <fastpm/libfastpm.h> #include <fastpm/logging.h> #include <fastpm/transfer.h> #include "pmpfft.h" #include "pmghosts.h" #include "pm2lpt.h" void pm_2lpt_solve(PM * pm, FastPMFloat * delta_k, FastPMFuncK * growth_rate_func_k, FastPMStore * p, double shift[3], FastPMKernelType type) { /* read out values at locations with an inverted shift */ int potorder, gradorder, deconvolveorder; fastpm_kernel_type_get_orders(type, &potorder, &gradorder, &deconvolveorder); /* calculate dx1, dx2, for initial fluctuation delta_k. * shift: martin has shift = 0.5, 0.5, 0.5. * Use shift of 0, 0, 0 if in doublt. * */ ptrdiff_t i; int d; /* It is important to (de-)shift the particles before creating ghosts * Because we will read out from the (de-)shifted positions. * Otherwise the IC will have artifacts along the edges of domains. */ for(i = 0; i < p->np; i ++) { for(d = 0; d < 3; d ++) { p->x[i][d] -= shift[d]; } } FastPMPainter painter[1]; fastpm_painter_init(painter, pm, FASTPM_PAINTER_CIC, 0); PMGhostData * pgd; if (p->dv1) { //could alternatively use if (growth_rate_func_k) pgd = pm_ghosts_create(pm, p, p->attributes | COLUMN_DX1 | COLUMN_DX2 | COLUMN_DV1, painter->support); } else { pgd = pm_ghosts_create(pm, p, p->attributes | COLUMN_DX1 | COLUMN_DX2, painter->support); } pm_ghosts_send(pgd, COLUMN_POS); FastPMFloat * source = pm_alloc(pm); FastPMFloat * workspace = pm_alloc(pm); memset(source, 0, sizeof(source[0]) * pm->allocsize); memset(workspace, 0, sizeof(workspace[0]) * pm->allocsize); FastPMFloat * field[3]; for(d = 0; d < 3; d++ ) { field[d] = pm_alloc(pm); memset(field[d], 0, sizeof(field[d][0]) * pm->allocsize); } FastPMFieldDescr DX1[] = { {COLUMN_DX1, 0}, {COLUMN_DX1, 1}, {COLUMN_DX1, 2}}; FastPMFieldDescr DX2[] = { {COLUMN_DX2, 0}, {COLUMN_DX2, 1}, {COLUMN_DX2, 2}}; FastPMFieldDescr DV1[] = { {COLUMN_DV1, 0}, {COLUMN_DV1, 1}, {COLUMN_DV1, 2}}; // this will do nothing throughout the function if !p->v1 int D1[] = {1, 2, 0}; int D2[] = {2, 0, 1}; /* 1LPT */ for(d = 0; d < 3; d++) { /* dx1 */ fastpm_apply_laplace_transfer(pm, delta_k, workspace, potorder); fastpm_apply_diff_transfer(pm, workspace, workspace, d); pm_c2r(pm, workspace); fastpm_readout_local(painter, workspace, p, p->np, DX1[d]); fastpm_readout_local(painter, workspace, pgd->p, pgd->p->np, DX1[d]); /* dv1 */ if (p->dv1) { fastpm_apply_laplace_transfer(pm, delta_k, workspace, potorder); fastpm_apply_diff_transfer(pm, workspace, workspace, d); fastpm_apply_any_transfer(pm, workspace, workspace, (fastpm_fkfunc) fastpm_funck_eval2, growth_rate_func_k); pm_c2r(pm, workspace); fastpm_readout_local(painter, workspace, p, p->np, DV1[d]); fastpm_readout_local(painter, workspace, pgd->p, pgd->p->np, DV1[d]); } } /* 2LPT */ for(d = 0; d< 3; d++) { fastpm_apply_laplace_transfer(pm, delta_k, field[d], potorder); fastpm_apply_diff_transfer(pm, field[d], field[d], d); fastpm_apply_diff_transfer(pm, field[d], field[d], d); pm_c2r(pm, field[d]); } for(d = 0; d < 3; d++) { int d1 = D1[d]; int d2 = D2[d]; #pragma omp parallel for for(i = 0; i < pm->IRegion.total; i ++) { source[i] += field[d1][i] * field[d2][i]; } } for(d = 0; d < 3; d++) { int d1 = D1[d]; int d2 = D2[d]; fastpm_apply_laplace_transfer(pm, delta_k, workspace, potorder); fastpm_apply_diff_transfer(pm, workspace, workspace, d1); fastpm_apply_diff_transfer(pm, workspace, workspace, d2); pm_c2r(pm, workspace); #pragma omp parallel for for(i = 0; i < pm->IRegion.total; i ++) { source[i] -= workspace[i] * workspace[i]; } } pm_r2c(pm, source, workspace); pm_assign(pm, workspace, source); for(d = 0; d < 3; d++) { /* * We absorb some the negative factor in za transfer to below; * * */ fastpm_apply_laplace_transfer(pm, source, workspace, potorder); fastpm_apply_diff_transfer(pm, workspace, workspace, d); pm_c2r(pm, workspace); /* this ensures x = x0 + dx1(t) + dx2(t) */ fastpm_apply_multiply_transfer(pm, workspace, workspace, 3.0 / 7); fastpm_readout_local(painter, workspace, p, p->np, DX2[d]); fastpm_readout_local(painter, workspace, pgd->p, pgd->p->np, DX2[d]); } pm_ghosts_reduce(pgd, COLUMN_DX1, FastPMReduceAddFloat, NULL); pm_ghosts_reduce(pgd, COLUMN_DX2, FastPMReduceAddFloat, NULL); if (p->dv1) pm_ghosts_reduce(pgd, COLUMN_DV1, FastPMReduceAddFloat, NULL); #ifdef PM_2LPT_DUMP fwrite(p->dx1, sizeof(p->dx1[0]), p->np, fopen("dx1.f4x3", "w")); fwrite(p->dx2, sizeof(p->dx2[0]), p->np, fopen("dx2.f4x3", "w")); #endif for(i = 0; i < p->np; i ++) { for(d = 0; d < 3; d ++) { p->x[i][d] += shift[d]; } } for(d = 0; d < 3; d ++) { pm_free(pm, field[2-d]); } pm_free(pm, workspace); pm_free(pm, source); pm_ghosts_free(pgd); } // Interpolate position and velocity for snapshot at a=aout void pm_2lpt_evolve(double aout, FastPMStore * p, FastPMCosmology * c, int zaonly) { int np = p->np; FastPMGrowthInfo gi; fastpm_growth_info_init(&gi, aout, c); double D1 = gi.D1; double D2 = gi.D2; double f1 = gi.f1; double f2 = gi.f2; double E = HubbleEa(aout, c); double dv1_prefac = D1 * aout * aout * E; double Dv1 = dv1_prefac * f1; double Dv2 = D2 * aout * aout * E * f2; fastpm_info("2LPT ICs set at z=%g: E=%g D1=%g, D2=%g, f1=%g, f2=%g\n", 1./aout-1, E, D1, D2, f1, f2); if(zaonly) { D2 = 0; Dv2 = 0; } int i; #pragma omp parallel for for(i=0; i<np; i++) { int d; for(d = 0; d < 3; d ++) { p->x[i][d] += D1 * p->dx1[i][d] + D2 * p->dx2[i][d]; if(p->v) { p->v[i][d] += p->dx2[i][d]*Dv2; if (p->dv1) { p->v[i][d] += dv1_prefac * p->dv1[i][d]; } else { p->v[i][d] += Dv1 * p->dx1[i][d]; } } } } p->meta.a_x = p->meta.a_v = aout; }
omp-for-nested.c
#include <stdio.h> #include <unistd.h> int main() { int i,j; #pragma omp parallel for for(i = 0; i < 5; i++) { #pragma omp parallel for firstprivate(i) for(j = 0; j < 5; j++) { printf("Hello World %d, %d\n",i,j); } } return 0; }
convolution_sgemm_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack4_neon(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 16u, 4, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const float* bias = _bias; // permute Mat tmp; #if __aarch64__ if (size >= 12) tmp.create(12 * maxk, inch, size / 12 + (size % 12) / 8 + (size % 12 % 8) / 4 + (size % 12 % 4) / 2 + size % 12 % 2, 16u, 4, opt.workspace_allocator); else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 16u, 4, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 16u, 4, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 16u, 4, opt.workspace_allocator); else tmp.create(maxk, inch, size, 16u, 4, opt.workspace_allocator); #else if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + (size % 4) / 2 + size % 2, 16u, 4, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + (size % 4) / 2 + size % 2, 16u, 4, opt.workspace_allocator); else if (size >= 2) tmp.create(2 * maxk, inch, size / 2 + size % 2, 16u, 4, opt.workspace_allocator); else tmp.create(maxk, inch, size, 16u, 4, opt.workspace_allocator); #endif { #if __aarch64__ int nn_size = size / 12; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 12; float* tmpptr = tmp.channel(i / 12); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld4 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld4 {v8.4s, v9.4s, v10.4s, v11.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" "st1 {v4.4s}, [%1], #16 \n" "st1 {v8.4s}, [%1], #16 \n" "sub %0, %0, #128 \n" "st1 {v1.4s}, [%1], #16 \n" "st1 {v5.4s}, [%1], #16 \n" "st1 {v9.4s}, [%1], #16 \n" "st1 {v2.4s}, [%1], #16 \n" "st1 {v6.4s}, [%1], #16 \n" "st1 {v10.4s}, [%1], #16 \n" "st1 {v3.4s}, [%1], #16 \n" "st1 {v7.4s}, [%1], #16 \n" "st1 {v11.4s}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11"); img0 += size * 4; } } } remain_size_start += nn_size * 12; nn_size = (size - remain_size_start) >> 3; #else int nn_size = size >> 3; int remain_size_start = 0; #endif #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); #else float* tmpptr = tmp.channel(i / 8); #endif for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0], #64 \n" "prfm pldl1keep, [%0, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" "sub %0, %0, #64 \n" "st1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7"); #else asm volatile( "pld [%0, #512] \n" "vldm %0!, {d0-d7} \n" "pld [%0, #512] \n" "vldm %0, {d16-d23} \n" // transpose 8x4 "vtrn.32 q0, q1 \n" "vtrn.32 q2, q3 \n" "vtrn.32 q8, q9 \n" "vtrn.32 q10, q11 \n" "vswp d1, d4 \n" "vswp d3, d6 \n" "vswp d17, d20 \n" "vswp d19, d22 \n" "vswp q1, q8 \n" "vswp q3, q10 \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" "sub %0, %0, #64 \n" "vst1.f32 {d4-d7}, [%1 :128]! \n" "vst1.f32 {d20-d23}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1", "q2", "q3", "q8", "q9", "q10", "q11"); #endif // __aarch64__ img0 += size * 4; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); #else float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); #endif for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%0] \n" "st1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%1], #64 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1", "v2", "v3"); #else asm volatile( "pld [%0, #512] \n" "vldm %0, {d0-d7} \n" "vstm %1!, {d0-d7} \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1", "q2", "q3"); #endif // __aarch64__ img0 += size * 4; } } } remain_size_start += nn_size << 2; nn_size = (size - remain_size_start) >> 1; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 2; #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); #else float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2); #endif for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #256] \n" "ld1 {v0.4s, v1.4s}, [%0] \n" "st1 {v0.4s, v1.4s}, [%1], #32 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0", "v1"); #else asm volatile( "pld [%0, #256] \n" "vld1.f32 {d0-d3}, [%0 :128] \n" "vst1.f32 {d0-d3}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0", "q1"); #endif // __aarch64__ img0 += size * 4; } } } remain_size_start += nn_size << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { #if __aarch64__ float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); #else float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); #endif for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i * 4; for (int k = 0; k < maxk; k++) { #if __aarch64__ asm volatile( "prfm pldl1keep, [%0, #128] \n" "ld1 {v0.4s}, [%0] \n" "st1 {v0.4s}, [%1], #16 \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "v0"); #else asm volatile( "pld [%0, #128] \n" "vld1.f32 {d0-d1}, [%0 :128] \n" "vst1.f32 {d0-d1}, [%1 :128]! \n" : "=r"(img0), // %0 "=r"(tmpptr) // %1 : "0"(img0), "1"(tmpptr) : "memory", "q0"); #endif // __aarch64__ img0 += size * 4; } } } } int remain_outch_start = 0; #if __aarch64__ int nn_outch = outch >> 1; remain_outch_start = nn_outch << 1; #pragma omp parallel for num_threads(opt.num_threads) for (int pp = 0; pp < nn_outch; pp++) { int p = pp * 2; float* outptr0 = top_blob.channel(p); float* outptr1 = top_blob.channel(p + 1); const float zeros[8] = {0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i = 0; for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); const float* kptr0 = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v8.16b, v0.16b \n" "mov v9.16b, v0.16b \n" "mov v10.16b, v0.16b \n" "mov v11.16b, v0.16b \n" "mov v12.16b, v0.16b \n" "mov v13.16b, v0.16b \n" "mov v14.16b, v0.16b \n" "mov v15.16b, v0.16b \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v1.16b \n" "mov v21.16b, v1.16b \n" "mov v22.16b, v1.16b \n" "mov v23.16b, v1.16b \n" "mov v24.16b, v1.16b \n" "mov v25.16b, v1.16b \n" "mov v26.16b, v1.16b \n" "mov v27.16b, v1.16b \n" "mov v28.16b, v1.16b \n" "mov v29.16b, v1.16b \n" "mov v30.16b, v1.16b \n" "mov v31.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w0011_01 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "fmla v20.4s, v5.4s, v0.s[0] \n" "fmla v21.4s, v5.4s, v0.s[1] \n" "fmla v22.4s, v5.4s, v0.s[2] \n" "fmla v23.4s, v5.4s, v0.s[3] \n" "fmla v24.4s, v5.4s, v1.s[0] \n" "fmla v25.4s, v5.4s, v1.s[1] \n" "fmla v26.4s, v5.4s, v1.s[2] \n" "fmla v27.4s, v5.4s, v1.s[3] \n" "fmla v28.4s, v5.4s, v2.s[0] \n" "fmla v29.4s, v5.4s, v2.s[1] \n" "fmla v30.4s, v5.4s, v2.s[2] \n" "fmla v31.4s, v5.4s, v2.s[3] \n" "fmla v8.4s, v6.4s, v3.s[0] \n" "fmla v9.4s, v6.4s, v3.s[1] \n" "fmla v10.4s, v6.4s, v3.s[2] \n" "fmla v11.4s, v6.4s, v3.s[3] \n" "fmla v20.4s, v7.4s, v3.s[0] \n" "fmla v21.4s, v7.4s, v3.s[1] \n" "fmla v22.4s, v7.4s, v3.s[2] \n" "fmla v23.4s, v7.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v12.4s, v6.4s, v0.s[0] \n" "fmla v13.4s, v6.4s, v0.s[1] \n" "fmla v14.4s, v6.4s, v0.s[2] \n" "fmla v15.4s, v6.4s, v0.s[3] \n" "fmla v16.4s, v6.4s, v1.s[0] \n" "fmla v17.4s, v6.4s, v1.s[1] \n" "fmla v18.4s, v6.4s, v1.s[2] \n" "fmla v19.4s, v6.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v0.s[0] \n" "fmla v25.4s, v7.4s, v0.s[1] \n" "fmla v26.4s, v7.4s, v0.s[2] \n" "fmla v27.4s, v7.4s, v0.s[3] \n" "fmla v28.4s, v7.4s, v1.s[0] \n" "fmla v29.4s, v7.4s, v1.s[1] \n" "fmla v30.4s, v7.4s, v1.s[2] \n" "fmla v31.4s, v7.4s, v1.s[3] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%4], #64 \n" // w2233_01 "fmla v8.4s, v4.4s, v2.s[0] \n" "fmla v9.4s, v4.4s, v2.s[1] \n" "fmla v10.4s, v4.4s, v2.s[2] \n" "fmla v11.4s, v4.4s, v2.s[3] \n" "fmla v12.4s, v4.4s, v3.s[0] \n" "fmla v13.4s, v4.4s, v3.s[1] \n" "fmla v14.4s, v4.4s, v3.s[2] \n" "fmla v15.4s, v4.4s, v3.s[3] \n" "fmla v20.4s, v5.4s, v2.s[0] \n" "fmla v21.4s, v5.4s, v2.s[1] \n" "fmla v22.4s, v5.4s, v2.s[2] \n" "fmla v23.4s, v5.4s, v2.s[3] \n" "fmla v24.4s, v5.4s, v3.s[0] \n" "fmla v25.4s, v5.4s, v3.s[1] \n" "fmla v26.4s, v5.4s, v3.s[2] \n" "fmla v27.4s, v5.4s, v3.s[3] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" "fmla v16.4s, v4.4s, v0.s[0] \n" "fmla v17.4s, v4.4s, v0.s[1] \n" "fmla v18.4s, v4.4s, v0.s[2] \n" "fmla v19.4s, v4.4s, v0.s[3] \n" "fmla v28.4s, v5.4s, v0.s[0] \n" "fmla v29.4s, v5.4s, v0.s[1] \n" "fmla v30.4s, v5.4s, v0.s[2] \n" "fmla v31.4s, v5.4s, v0.s[3] \n" "fmla v8.4s, v6.4s, v1.s[0] \n" "fmla v9.4s, v6.4s, v1.s[1] \n" "fmla v10.4s, v6.4s, v1.s[2] \n" "fmla v11.4s, v6.4s, v1.s[3] \n" "fmla v12.4s, v6.4s, v2.s[0] \n" "fmla v13.4s, v6.4s, v2.s[1] \n" "fmla v14.4s, v6.4s, v2.s[2] \n" "fmla v15.4s, v6.4s, v2.s[3] \n" "fmla v16.4s, v6.4s, v3.s[0] \n" "fmla v17.4s, v6.4s, v3.s[1] \n" "fmla v18.4s, v6.4s, v3.s[2] \n" "fmla v19.4s, v6.4s, v3.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v20.4s, v7.4s, v1.s[0] \n" "fmla v21.4s, v7.4s, v1.s[1] \n" "fmla v22.4s, v7.4s, v1.s[2] \n" "fmla v23.4s, v7.4s, v1.s[3] \n" "fmla v24.4s, v7.4s, v2.s[0] \n" "fmla v25.4s, v7.4s, v2.s[1] \n" "fmla v26.4s, v7.4s, v2.s[2] \n" "fmla v27.4s, v7.4s, v2.s[3] \n" "fmla v28.4s, v7.4s, v3.s[0] \n" "fmla v29.4s, v7.4s, v3.s[1] \n" "fmla v30.4s, v7.4s, v3.s[2] \n" "fmla v31.4s, v7.4s, v3.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr0) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr0), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 7 < size; i += 8) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const float* kptr0 = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v0.16b \n" "mov v21.16b, v0.16b \n" "mov v22.16b, v0.16b \n" "mov v23.16b, v0.16b \n" "mov v24.16b, v1.16b \n" "mov v25.16b, v1.16b \n" "mov v26.16b, v1.16b \n" "mov v27.16b, v1.16b \n" "mov v28.16b, v1.16b \n" "mov v29.16b, v1.16b \n" "mov v30.16b, v1.16b \n" "mov v31.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v24.4s, v9.4s, v0.s[0] \n" "fmla v25.4s, v9.4s, v1.s[0] \n" "fmla v26.4s, v9.4s, v2.s[0] \n" "fmla v27.4s, v9.4s, v3.s[0] \n" "fmla v28.4s, v9.4s, v4.s[0] \n" "fmla v29.4s, v9.4s, v5.s[0] \n" "fmla v30.4s, v9.4s, v6.s[0] \n" "fmla v31.4s, v9.4s, v7.s[0] \n" "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v10.4s, v4.s[1] \n" "fmla v21.4s, v10.4s, v5.s[1] \n" "fmla v22.4s, v10.4s, v6.s[1] \n" "fmla v23.4s, v10.4s, v7.s[1] \n" "fmla v24.4s, v11.4s, v0.s[1] \n" "fmla v25.4s, v11.4s, v1.s[1] \n" "fmla v26.4s, v11.4s, v2.s[1] \n" "fmla v27.4s, v11.4s, v3.s[1] \n" "fmla v28.4s, v11.4s, v4.s[1] \n" "fmla v29.4s, v11.4s, v5.s[1] \n" "fmla v30.4s, v11.4s, v6.s[1] \n" "fmla v31.4s, v11.4s, v7.s[1] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v12.4s, v4.s[2] \n" "fmla v21.4s, v12.4s, v5.s[2] \n" "fmla v22.4s, v12.4s, v6.s[2] \n" "fmla v23.4s, v12.4s, v7.s[2] \n" "fmla v24.4s, v13.4s, v0.s[2] \n" "fmla v25.4s, v13.4s, v1.s[2] \n" "fmla v26.4s, v13.4s, v2.s[2] \n" "fmla v27.4s, v13.4s, v3.s[2] \n" "fmla v28.4s, v13.4s, v4.s[2] \n" "fmla v29.4s, v13.4s, v5.s[2] \n" "fmla v30.4s, v13.4s, v6.s[2] \n" "fmla v31.4s, v13.4s, v7.s[2] \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v14.4s, v4.s[3] \n" "fmla v21.4s, v14.4s, v5.s[3] \n" "fmla v22.4s, v14.4s, v6.s[3] \n" "fmla v23.4s, v14.4s, v7.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v24.4s, v15.4s, v0.s[3] \n" "fmla v25.4s, v15.4s, v1.s[3] \n" "fmla v26.4s, v15.4s, v2.s[3] \n" "fmla v27.4s, v15.4s, v3.s[3] \n" "fmla v28.4s, v15.4s, v4.s[3] \n" "fmla v29.4s, v15.4s, v5.s[3] \n" "fmla v30.4s, v15.4s, v6.s[3] \n" "fmla v31.4s, v15.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" "st1 {v28.4s, v29.4s, v30.4s, v31.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr0) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr0), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31"); } for (; i + 3 < size; i += 4) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr0 = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v1.16b \n" "mov v21.16b, v1.16b \n" "mov v22.16b, v1.16b \n" "mov v23.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%3], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v20.4s, v9.4s, v0.s[0] \n" "fmla v21.4s, v9.4s, v1.s[0] \n" "fmla v22.4s, v9.4s, v2.s[0] \n" "fmla v23.4s, v9.4s, v3.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v10.4s, v2.s[1] \n" "fmla v19.4s, v10.4s, v3.s[1] \n" "fmla v20.4s, v11.4s, v0.s[1] \n" "fmla v21.4s, v11.4s, v1.s[1] \n" "fmla v22.4s, v11.4s, v2.s[1] \n" "fmla v23.4s, v11.4s, v3.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v12.4s, v2.s[2] \n" "fmla v19.4s, v12.4s, v3.s[2] \n" "fmla v20.4s, v13.4s, v0.s[2] \n" "fmla v21.4s, v13.4s, v1.s[2] \n" "fmla v22.4s, v13.4s, v2.s[2] \n" "fmla v23.4s, v13.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v14.4s, v2.s[3] \n" "fmla v19.4s, v14.4s, v3.s[3] \n" "fmla v20.4s, v15.4s, v0.s[3] \n" "fmla v21.4s, v15.4s, v1.s[3] \n" "fmla v22.4s, v15.4s, v2.s[3] \n" "fmla v23.4s, v15.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr0) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr0), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); } for (; i + 1 < size; i += 2) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* kptr0 = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v0.4s, v1.4s}, [%10] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v1.16b \n" "mov v19.16b, v1.16b \n" "0: \n" "prfm pldl1keep, [%3, #256] \n" "ld1 {v0.4s, v1.4s}, [%3], #32 \n" // r0 r1 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v9.4s, v0.s[0] \n" "fmla v19.4s, v9.4s, v1.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v10.4s, v1.s[1] \n" "fmla v18.4s, v11.4s, v0.s[1] \n" "fmla v19.4s, v11.4s, v1.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v12.4s, v1.s[2] \n" "fmla v18.4s, v13.4s, v0.s[2] \n" "fmla v19.4s, v13.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v14.4s, v1.s[3] \n" "fmla v18.4s, v15.4s, v0.s[3] \n" "fmla v19.4s, v15.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" "st1 {v18.4s, v19.4s}, [%2], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr0) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr0), "r"(biasptr) // %10 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19"); } for (; i < size; i++) { const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* kptr0 = kernel.channel(p / 2); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v16.4s, v17.4s}, [%10] \n" "0: \n" "prfm pldl1keep, [%3, #128] \n" "ld1 {v0.4s}, [%3], #16 \n" // r0 "prfm pldl1keep, [%4, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%4], #64 \n" // w0011_01 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v9.4s, v0.s[0] \n" "prfm pldl1keep, [%4, #512] \n" "ld1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%4], #64 \n" // w2233_01 "fmla v16.4s, v10.4s, v0.s[1] \n" "fmla v17.4s, v11.4s, v0.s[1] \n" "fmla v16.4s, v12.4s, v0.s[2] \n" "fmla v17.4s, v13.4s, v0.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v14.4s, v0.s[3] \n" "fmla v17.4s, v15.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" "st1 {v17.4s}, [%2], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(outptr1), // %2 "=r"(tmpptr), // %3 "=r"(kptr0) // %4 : "0"(nn), "1"(outptr0), "2"(outptr1), "3"(tmpptr), "4"(kptr0), "r"(biasptr) // %10 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17"); } } #endif // __aarch64__ #pragma omp parallel for num_threads(opt.num_threads) for (int p = remain_outch_start; p < outch; p++) { float* outptr0 = top_blob.channel(p); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i = 0; #if __aarch64__ for (; i + 11 < size; i += 12) { const float* tmpptr = tmp.channel(i / 12); const float* kptr0 = kernel.channel(p / 2 + p % 2); int nn = inch * maxk; // inch always > 0 asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v8.16b, v0.16b \n" "mov v9.16b, v0.16b \n" "mov v10.16b, v0.16b \n" "mov v11.16b, v0.16b \n" "mov v12.16b, v0.16b \n" "mov v13.16b, v0.16b \n" "mov v14.16b, v0.16b \n" "mov v15.16b, v0.16b \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" "prfm pldl1keep, [%3, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%3], #64 \n" // w0123_0 "fmla v8.4s, v4.4s, v0.s[0] \n" "fmla v9.4s, v4.4s, v0.s[1] \n" "fmla v10.4s, v4.4s, v0.s[2] \n" "fmla v11.4s, v4.4s, v0.s[3] \n" "fmla v12.4s, v4.4s, v1.s[0] \n" "fmla v13.4s, v4.4s, v1.s[1] \n" "fmla v14.4s, v4.4s, v1.s[2] \n" "fmla v15.4s, v4.4s, v1.s[3] \n" "fmla v16.4s, v4.4s, v2.s[0] \n" "fmla v17.4s, v4.4s, v2.s[1] \n" "fmla v18.4s, v4.4s, v2.s[2] \n" "fmla v19.4s, v4.4s, v2.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%2], #64 \n" "fmla v8.4s, v5.4s, v3.s[0] \n" "fmla v9.4s, v5.4s, v3.s[1] \n" "fmla v10.4s, v5.4s, v3.s[2] \n" "fmla v11.4s, v5.4s, v3.s[3] \n" "fmla v12.4s, v5.4s, v20.s[0] \n" "fmla v13.4s, v5.4s, v20.s[1] \n" "fmla v14.4s, v5.4s, v20.s[2] \n" "fmla v15.4s, v5.4s, v20.s[3] \n" "fmla v16.4s, v5.4s, v21.s[0] \n" "fmla v17.4s, v5.4s, v21.s[1] \n" "fmla v18.4s, v5.4s, v21.s[2] \n" "fmla v19.4s, v5.4s, v21.s[3] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v24.4s, v25.4s, v26.4s, v27.4s}, [%2], #64 \n" "fmla v8.4s, v6.4s, v22.s[0] \n" "fmla v9.4s, v6.4s, v22.s[1] \n" "fmla v10.4s, v6.4s, v22.s[2] \n" "fmla v11.4s, v6.4s, v22.s[3] \n" "fmla v12.4s, v6.4s, v23.s[0] \n" "fmla v13.4s, v6.4s, v23.s[1] \n" "fmla v14.4s, v6.4s, v23.s[2] \n" "fmla v15.4s, v6.4s, v23.s[3] \n" "fmla v16.4s, v6.4s, v24.s[0] \n" "fmla v17.4s, v6.4s, v24.s[1] \n" "fmla v18.4s, v6.4s, v24.s[2] \n" "fmla v19.4s, v6.4s, v24.s[3] \n" "subs %w0, %w0, #1 \n" "fmla v8.4s, v7.4s, v25.s[0] \n" "fmla v9.4s, v7.4s, v25.s[1] \n" "fmla v10.4s, v7.4s, v25.s[2] \n" "fmla v11.4s, v7.4s, v25.s[3] \n" "fmla v12.4s, v7.4s, v26.s[0] \n" "fmla v13.4s, v7.4s, v26.s[1] \n" "fmla v14.4s, v7.4s, v26.s[2] \n" "fmla v15.4s, v7.4s, v26.s[3] \n" "fmla v16.4s, v7.4s, v27.s[0] \n" "fmla v17.4s, v7.4s, v27.s[1] \n" "fmla v18.4s, v7.4s, v27.s[2] \n" "fmla v19.4s, v7.4s, v27.s[3] \n" "bne 0b \n" "st1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%1], #64 \n" "st1 {v12.4s, v13.4s, v14.4s, v15.4s}, [%1], #64 \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27"); } #endif // __aarch64__ for (; i + 7 < size; i += 8) { #if __aarch64__ const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8); const float* kptr0 = kernel.channel(p / 2 + p % 2); #else const float* tmpptr = tmp.channel(i / 8); const float* kptr0 = kernel.channel(p); #endif int nn = inch * maxk; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "mov v20.16b, v0.16b \n" "mov v21.16b, v0.16b \n" "mov v22.16b, v0.16b \n" "mov v23.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v4.4s, v5.4s, v6.4s, v7.4s}, [%2], #64 \n" // r4 r5 r6 r7 "fmla v20.4s, v8.4s, v4.s[0] \n" "fmla v21.4s, v8.4s, v5.s[0] \n" "fmla v22.4s, v8.4s, v6.s[0] \n" "fmla v23.4s, v8.4s, v7.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v20.4s, v9.4s, v4.s[1] \n" "fmla v21.4s, v9.4s, v5.s[1] \n" "fmla v22.4s, v9.4s, v6.s[1] \n" "fmla v23.4s, v9.4s, v7.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "fmla v20.4s, v10.4s, v4.s[2] \n" "fmla v21.4s, v10.4s, v5.s[2] \n" "fmla v22.4s, v10.4s, v6.s[2] \n" "fmla v23.4s, v10.4s, v7.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "fmla v20.4s, v11.4s, v4.s[3] \n" "fmla v21.4s, v11.4s, v5.s[3] \n" "fmla v22.4s, v11.4s, v6.s[3] \n" "fmla v23.4s, v11.4s, v7.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" "st1 {v20.4s, v21.4s, v22.4s, v23.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23"); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "vmov q10, q0 \n" "vmov q11, q0 \n" "vmov q12, q0 \n" "vmov q13, q0 \n" "vmov q14, q0 \n" "vmov q15, q0 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d0[1] \n" "vmla.f32 q10, q4, d1[0] \n" "vmla.f32 q11, q4, d1[1] \n" "vmla.f32 q12, q4, d2[0] \n" "vmla.f32 q13, q4, d2[1] \n" "vmla.f32 q14, q4, d3[0] \n" "vmla.f32 q15, q4, d3[1] \n" "vmla.f32 q8, q5, d4[0] \n" "vmla.f32 q9, q5, d4[1] \n" "vmla.f32 q10, q5, d5[0] \n" "vmla.f32 q11, q5, d5[1] \n" "vmla.f32 q12, q5, d6[0] \n" "vmla.f32 q13, q5, d6[1] \n" "vmla.f32 q14, q5, d7[0] \n" "vmla.f32 q15, q5, d7[1] \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "vmla.f32 q8, q6, d0[0] \n" "vmla.f32 q9, q6, d0[1] \n" "vmla.f32 q10, q6, d1[0] \n" "vmla.f32 q11, q6, d1[1] \n" "vmla.f32 q12, q6, d2[0] \n" "vmla.f32 q13, q6, d2[1] \n" "vmla.f32 q14, q6, d3[0] \n" "vmla.f32 q15, q6, d3[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d4[0] \n" "vmla.f32 q9, q7, d4[1] \n" "vmla.f32 q10, q7, d5[0] \n" "vmla.f32 q11, q7, d5[1] \n" "vmla.f32 q12, q7, d6[0] \n" "vmla.f32 q13, q7, d6[1] \n" "vmla.f32 q14, q7, d7[0] \n" "vmla.f32 q15, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" "vstm %1!, {d24-d31} \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15"); #endif } for (; i + 3 < size; i += 4) { #if __aarch64__ const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4); const float* kptr0 = kernel.channel(p / 2 + p % 2); #else const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const float* kptr0 = kernel.channel(p); #endif int nn = inch * maxk; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "mov v18.16b, v0.16b \n" "mov v19.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #512] \n" "ld1 {v0.4s, v1.4s, v2.4s, v3.4s}, [%2], #64 \n" // r0 r1 r2 r3 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v18.4s, v8.4s, v2.s[0] \n" "fmla v19.4s, v8.4s, v3.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v18.4s, v9.4s, v2.s[1] \n" "fmla v19.4s, v9.4s, v3.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "fmla v18.4s, v10.4s, v2.s[2] \n" "fmla v19.4s, v10.4s, v3.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "fmla v18.4s, v11.4s, v2.s[3] \n" "fmla v19.4s, v11.4s, v3.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s, v18.4s, v19.4s}, [%1], #64 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v2", "v3", "v8", "v9", "v10", "v11", "v16", "v17", "v18", "v19"); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "vmov q10, q0 \n" "vmov q11, q0 \n" "0: \n" "pld [%2, #512] \n" "vldm %2!, {d0-d7} \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q10, q4, d4[0] \n" "vmla.f32 q11, q4, d6[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q10, q5, d4[1] \n" "vmla.f32 q11, q5, d6[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "vmla.f32 q10, q6, d5[0] \n" "vmla.f32 q11, q6, d7[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "vmla.f32 q10, q7, d5[1] \n" "vmla.f32 q11, q7, d7[1] \n" "bne 0b \n" "vstm %1!, {d16-d23} \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11"); #endif } for (; i + 1 < size; i += 2) { #if __aarch64__ const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2); const float* kptr0 = kernel.channel(p / 2 + p % 2); #else const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2); const float* kptr0 = kernel.channel(p); #endif int nn = inch * maxk; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v0.4s}, [%8] \n" "mov v16.16b, v0.16b \n" "mov v17.16b, v0.16b \n" "0: \n" "prfm pldl1keep, [%2, #256] \n" "ld1 {v0.4s, v1.4s}, [%2], #32 \n" // r0 r1 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v17.4s, v8.4s, v1.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "fmla v17.4s, v9.4s, v1.s[1] \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v17.4s, v10.4s, v1.s[2] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "fmla v17.4s, v11.4s, v1.s[3] \n" "bne 0b \n" "st1 {v16.4s, v17.4s}, [%1], #32 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v1", "v8", "v9", "v10", "v11", "v16", "v17"); #else asm volatile( "vld1.f32 {d0-d1}, [%8] \n" "vmov q8, q0 \n" "vmov q9, q0 \n" "0: \n" "pld [%2, #256] \n" "vld1.f32 {d0-d3}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q9, q4, d2[0] \n" "vmla.f32 q8, q5, d0[1] \n" "vmla.f32 q9, q5, d2[1] \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q9, q6, d3[0] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q7, d1[1] \n" "vmla.f32 q9, q7, d3[1] \n" "bne 0b \n" "vst1.f32 {d16-d19}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q1", "q4", "q5", "q6", "q7", "q8", "q9"); #endif } for (; i < size; i++) { #if __aarch64__ const float* tmpptr = tmp.channel(i / 12 + (i % 12) / 8 + (i % 12 % 8) / 4 + (i % 12 % 4) / 2 + i % 12 % 2); const float* kptr0 = kernel.channel(p / 2 + p % 2); #else const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + (i % 4) / 2 + i % 2); const float* kptr0 = kernel.channel(p); #endif int nn = inch * maxk; // inch always > 0 #if __aarch64__ asm volatile( "ld1 {v16.4s}, [%8] \n" "0: \n" "prfm pldl1keep, [%2, #128] \n" "ld1 {v0.4s}, [%2], #16 \n" // r0 "prfm pldl1keep, [%3, #512] \n" "ld1 {v8.4s, v9.4s, v10.4s, v11.4s}, [%3], #64 \n" // w0123 "fmla v16.4s, v8.4s, v0.s[0] \n" "fmla v16.4s, v9.4s, v0.s[1] \n" "subs %w0, %w0, #1 \n" "fmla v16.4s, v10.4s, v0.s[2] \n" "fmla v16.4s, v11.4s, v0.s[3] \n" "bne 0b \n" "st1 {v16.4s}, [%1], #16 \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "v0", "v8", "v9", "v10", "v11", "v16"); #else asm volatile( "vld1.f32 {d16-d17}, [%8] \n" "0: \n" "pld [%2, #128] \n" "vld1.f32 {d0-d1}, [%2 :128]! \n" "pld [%3, #512] \n" "vldm %3!, {d8-d15} \n" "vmla.f32 q8, q4, d0[0] \n" "vmla.f32 q8, q5, d0[1] \n" "subs %0, %0, #1 \n" "vmla.f32 q8, q6, d1[0] \n" "vmla.f32 q8, q7, d1[1] \n" "bne 0b \n" "vst1.f32 {d16-d17}, [%1 :128]! \n" : "=r"(nn), // %0 "=r"(outptr0), // %1 "=r"(tmpptr), // %2 "=r"(kptr0) // %3 : "0"(nn), "1"(outptr0), "2"(tmpptr), "3"(kptr0), "r"(biasptr) // %8 : "cc", "memory", "q0", "q4", "q5", "q6", "q7", "q8"); #endif } } } static void convolution_im2col_sgemm_transform_kernel_pack4_neon(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 4b-4a-maxk-inch/4a-outch/4b Mat kernel = _kernel.reshape(maxk, inch, outch); #if __aarch64__ kernel_tm.create(32 * maxk, inch / 4, outch / 8 + (outch % 8) / 4); #else kernel_tm.create(16 * maxk, inch / 4, outch / 4); #endif int q = 0; #if __aarch64__ for (; q + 7 < outch; q += 8) { const Mat k0 = kernel.channel(q); const Mat k1 = kernel.channel(q + 1); const Mat k2 = kernel.channel(q + 2); const Mat k3 = kernel.channel(q + 3); const Mat k4 = kernel.channel(q + 4); const Mat k5 = kernel.channel(q + 5); const Mat k6 = kernel.channel(q + 6); const Mat k7 = kernel.channel(q + 7); float* g00 = kernel_tm.channel(q / 8); for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); const float* k40 = k4.row(p); const float* k41 = k4.row(p + 1); const float* k42 = k4.row(p + 2); const float* k43 = k4.row(p + 3); const float* k50 = k5.row(p); const float* k51 = k5.row(p + 1); const float* k52 = k5.row(p + 2); const float* k53 = k5.row(p + 3); const float* k60 = k6.row(p); const float* k61 = k6.row(p + 1); const float* k62 = k6.row(p + 2); const float* k63 = k6.row(p + 3); const float* k70 = k7.row(p); const float* k71 = k7.row(p + 1); const float* k72 = k7.row(p + 2); const float* k73 = k7.row(p + 3); for (int k = 0; k < maxk; k++) { g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k40[k]; g00[5] = k50[k]; g00[6] = k60[k]; g00[7] = k70[k]; g00[8] = k01[k]; g00[9] = k11[k]; g00[10] = k21[k]; g00[11] = k31[k]; g00[12] = k41[k]; g00[13] = k51[k]; g00[14] = k61[k]; g00[15] = k71[k]; g00[16] = k02[k]; g00[17] = k12[k]; g00[18] = k22[k]; g00[19] = k32[k]; g00[20] = k42[k]; g00[21] = k52[k]; g00[22] = k62[k]; g00[23] = k72[k]; g00[24] = k03[k]; g00[25] = k13[k]; g00[26] = k23[k]; g00[27] = k33[k]; g00[28] = k43[k]; g00[29] = k53[k]; g00[30] = k63[k]; g00[31] = k73[k]; g00 += 32; } } } #endif // __aarch64__ for (; q + 3 < outch; q += 4) { const Mat k0 = kernel.channel(q); const Mat k1 = kernel.channel(q + 1); const Mat k2 = kernel.channel(q + 2); const Mat k3 = kernel.channel(q + 3); #if __aarch64__ float* g00 = kernel_tm.channel(q / 8 + (q % 8) / 4); #else float* g00 = kernel_tm.channel(q / 4); #endif for (int p = 0; p + 3 < inch; p += 4) { const float* k00 = k0.row(p); const float* k01 = k0.row(p + 1); const float* k02 = k0.row(p + 2); const float* k03 = k0.row(p + 3); const float* k10 = k1.row(p); const float* k11 = k1.row(p + 1); const float* k12 = k1.row(p + 2); const float* k13 = k1.row(p + 3); const float* k20 = k2.row(p); const float* k21 = k2.row(p + 1); const float* k22 = k2.row(p + 2); const float* k23 = k2.row(p + 3); const float* k30 = k3.row(p); const float* k31 = k3.row(p + 1); const float* k32 = k3.row(p + 2); const float* k33 = k3.row(p + 3); for (int k = 0; k < maxk; k++) { g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00[4] = k01[k]; g00[5] = k11[k]; g00[6] = k21[k]; g00[7] = k31[k]; g00[8] = k02[k]; g00[9] = k12[k]; g00[10] = k22[k]; g00[11] = k32[k]; g00[12] = k03[k]; g00[13] = k13[k]; g00[14] = k23[k]; g00[15] = k33[k]; g00 += 16; } } } } static void convolution_im2col_sgemm_pack4_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 16u, 4, opt.workspace_allocator); { const int gap = (w * stride_h - outw * stride_w) * 4; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); float* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const float* sptr = img.row<const float>(dilation_h * u) + dilation_w * v * 4; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { float32x4_t _val0 = vld1q_f32(sptr); float32x4_t _val1 = vld1q_f32(sptr + stride_w * 4); float32x4_t _val2 = vld1q_f32(sptr + stride_w * 8); float32x4_t _val3 = vld1q_f32(sptr + stride_w * 12); vst1q_f32(ptr, _val0); vst1q_f32(ptr + 4, _val1); vst1q_f32(ptr + 8, _val2); vst1q_f32(ptr + 12, _val3); sptr += stride_w * 16; ptr += 16; } for (; j + 1 < outw; j += 2) { float32x4_t _val0 = vld1q_f32(sptr); float32x4_t _val1 = vld1q_f32(sptr + stride_w * 4); vst1q_f32(ptr, _val0); vst1q_f32(ptr + 4, _val1); sptr += stride_w * 8; ptr += 8; } for (; j < outw; j++) { float32x4_t _val = vld1q_f32(sptr); vst1q_f32(ptr, _val); sptr += stride_w * 4; ptr += 4; } sptr += gap; } } } } } im2col_sgemm_pack4_neon(bottom_im2col, top_blob, kernel, _bias, opt); }
omp_for_bigbounds.c
// RUN: %libomp-compile -DMY_SCHEDULE=static && %libomp-run // RUN: %libomp-compile -DMY_SCHEDULE=dynamic && %libomp-run // RUN: %libomp-compile -DMY_SCHEDULE=guided && %libomp-run // Only works with Intel Compiler since at least version 15.0 // XFAIL: gcc, clang /* * Test that large bounds are handled properly and calculations of * loop iterations don't accidently overflow */ #include <stdio.h> #include <omp.h> #include <stdlib.h> #include <limits.h> #include "omp_testsuite.h" #define INCR 50000000 #define MY_MAX 2000000000 #define MY_MIN -2000000000 #ifndef MY_SCHEDULE # define MY_SCHEDULE static #endif int a, b, a_known_value, b_known_value; int test_omp_for_bigbounds() { a = 0; b = 0; #pragma omp parallel { int i; #pragma omp for schedule(MY_SCHEDULE) for (i = INT_MIN; i < MY_MAX; i+=INCR) { #pragma omp atomic a++; } #pragma omp for schedule(MY_SCHEDULE) for (i = INT_MAX; i >= MY_MIN; i-=INCR) { #pragma omp atomic b++; } } printf("a = %d (should be %d), b = %d (should be %d)\n", a, a_known_value, b, b_known_value); return (a == a_known_value && b == b_known_value); } int main() { int i; int num_failed=0; a_known_value = 0; for (i = INT_MIN; i < MY_MAX; i+=INCR) { a_known_value++; } b_known_value = 0; for (i = INT_MAX; i >= MY_MIN; i-=INCR) { b_known_value++; } for(i = 0; i < REPETITIONS; i++) { if(!test_omp_for_bigbounds()) { num_failed++; } } return num_failed; }
inputTimed.c
#include<stdlib.h> #include<sys/time.h> #include<stdio.h> #include<omp.h> #define SIZE 100 #define THRESHOLD 0.1 int main(int argc, char *argv[]) { double threshold = THRESHOLD; if (argc == 2) { sscanf(argv[1], "%lf", &threshold); } printf("Threshold: %lf\n", threshold); // Getting current time to help randomize the system srand(10); // Initializing the 2D ocean float arr[SIZE][SIZE]; int p=0, q=0; for(p=0; p < SIZE; p++) { for(q = 0; q < SIZE; q++) { arr[p][q] = rand() % (SIZE*10); } } // Printing the Input Ocean //for(p = 0; p < SIZE; p++) { // for(q = 0; q < SIZE; q++) { // fprintf(stderr, "%.2f\t", arr[p][q]); // } // fprintf(stderr, "\n"); //} float diff = 0; // Propagating the Ocean Waves #pragma omp parallel shared(diff) { /** fprintf(stderr, "\n Hello World by Thread # %d\n", omp_get_thread_num()); **/ int done = 0; float mydiff = 0; int loop = 0; struct timeval tvStart, tvStop, bStart, bStop; long total = 0; gettimeofday(&tvStart, NULL); while(!done) { loop++; mydiff = 0; diff = 0; gettimeofday(&bStart, NULL); #pragma omp barrier gettimeofday(&bStop, NULL); total += (1000000*(bStop.tv_sec - bStart.tv_sec) + (bStop.tv_usec - bStart.tv_usec)); int mymin = (SIZE/omp_get_num_threads())*omp_get_thread_num(); // Assuming SIZE%omp_get_num_threads()==0 int mymax = mymin + SIZE/omp_get_num_threads(); int i, j; for (i = mymin; i < mymax; i++) { // i spans from mymin (inclusive) to mymax (exclusive) for(j = 0; j < SIZE; j++) { float temp = arr[i][j]; arr[i][j] = 0.2 * (arr[i][j] + ((i+1)>=SIZE?0:arr[i+1][j]) + ((j+1)>=SIZE?0:arr[i][j+1]) + ((i-1)<0?0:arr[i-1][j]) + ((j-1)<0?0:arr[i][j-1])); mydiff += arr[i][j] < temp ? temp - arr[i][j] : arr[i][j] -temp; } } #pragma omp critical { diff += mydiff; } gettimeofday(&bStart, NULL); #pragma omp barrier gettimeofday(&bStop, NULL); total += (1000000*(bStop.tv_sec - bStart.tv_sec) + (bStop.tv_usec - bStart.tv_usec)); if(((float)diff)/(SIZE*SIZE) < threshold || loop > 100000) { done = 1; } gettimeofday(&bStart, NULL); #pragma omp barrier gettimeofday(&bStop, NULL); total += (1000000*(bStop.tv_sec - bStart.tv_sec) + (bStop.tv_usec - bStart.tv_usec)); } double stop = (double)clock(); gettimeofday(&tvStop, 0); fprintf(stdout, "Time taken by thread %d, for %d loops is %ld microseconds.\n",omp_get_thread_num(), loop, 1000000*(tvStop.tv_sec-tvStart.tv_sec) + (tvStop.tv_usec-tvStart.tv_usec)); fprintf(stdout, "Time taken by thread %d, for synchronization is %ld microseconds.\n",omp_get_thread_num(), total); } for(p = 0; p < SIZE; p++) { for(q = 0; q < SIZE; q++) { fprintf(stderr, "%.2f\t", arr[p][q]); break; } fprintf(stderr, "\n"); break; } }
snmg_test_utils.h
/* * Copyright (c) 2019, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // Interanl helper functions // Author: Alex Fender afender@nvidia.com #pragma once #include <omp.h> #include "test_utils.h" #include <fstream> // std::ifstream // global to local offsets by shifting all offsets by the first offset value template <typename T> void shift_offsets(std::vector<T> & off_loc) { auto start = off_loc.front(); for (auto i = size_t{0}; i < off_loc.size(); ++i) off_loc[i] -= start; } // 1D partitioning such as each GPU has about the same number of edges template <typename T> void edge_partioning(std::vector<T> & off_h, std::vector<size_t> & part_offset, std::vector<size_t> & v_loc, std::vector<size_t> & e_loc) { auto i = omp_get_thread_num(); auto p = omp_get_num_threads(); //set first and last partition offsets part_offset[0] = 0; part_offset[p] = off_h.size()-1; if (i>0) { //get the first vertex ID of each partition auto loc_nnz = off_h.back()/p; auto start_nnz = i*loc_nnz; auto start_v = 0; for (auto j = size_t{0}; j < off_h.size(); ++j) { if (off_h[j] > start_nnz) { start_v = j; break; } } part_offset[i] = start_v; } // all threads must know their partition offset #pragma omp barrier // Store the local number of V and E for convinience v_loc[i] = part_offset[i+1] - part_offset[i]; e_loc[i] = off_h[part_offset[i+1]] - off_h[part_offset[i]]; } // csv for HiBench template <typename idx_t> int read_single_file(std::string fileName, std::vector<idx_t>& s, std::vector<idx_t>& d) { s.clear(); d.clear(); std::ifstream f(fileName); if (!f) { return 1; } idx_t src, dst; while (f>>src>>dst) { s.push_back(src); d.push_back(dst); } f.close(); return 0; } template <typename idx_t,typename val_t> void load_csr_loc(std::vector<idx_t> & off_h, std::vector<idx_t> & ind_h, std::vector<val_t> & val_h, std::vector<size_t> & v_loc, std::vector<size_t> & e_loc, std::vector<size_t> & part_offset, gdf_column* col_off, gdf_column* col_ind, gdf_column* col_val) { auto i = omp_get_thread_num(); auto p = omp_get_num_threads(); edge_partioning(off_h, part_offset, v_loc, e_loc); ASSERT_EQ(part_offset[i+1]-part_offset[i], v_loc[i]); std::vector<idx_t> off_loc(off_h.begin()+part_offset[i],off_h.begin()+part_offset[i+1]+1), ind_loc(ind_h.begin()+off_h[part_offset[i]],ind_h.begin()+off_h[part_offset[i+1]]); std::vector<val_t> val_loc(val_h.begin()+off_h[part_offset[i]],val_h.begin()+off_h[part_offset[i+1]]); ASSERT_EQ(off_loc.size(), v_loc[i]+1); ASSERT_EQ(ind_loc.size(), e_loc[i]); ASSERT_EQ(val_loc.size(), e_loc[i]); #ifdef SNMG_VERBOSE #pragma omp barrier #pragma omp master { std::cout << off_h[part_offset[i]]<< std::endl; std::cout << off_h[part_offset[i+1]]<< std::endl; for (auto j = part_offset.begin(); j != part_offset.end(); ++j) std::cout << *j << ' '; std::cout << std::endl; for (auto j = v_loc.begin(); j != v_loc.end(); ++j) std::cout << *j << ' '; std::cout << std::endl; for (auto j = e_loc.begin(); j != e_loc.end(); ++j) std::cout << *j << ' '; std::cout << std::endl; } #pragma omp barrier #endif shift_offsets(off_loc); ASSERT_EQ(static_cast<size_t>(off_loc[part_offset[i+1]-part_offset[i]]),e_loc[i]); create_gdf_column(off_loc, col_off); ASSERT_EQ(off_loc.size(), static_cast<size_t>(col_off->size)); create_gdf_column(ind_loc, col_ind); create_gdf_column(val_loc, col_val); }
GB_bitmap_assign_A_template.c
//------------------------------------------------------------------------------ // GB_bitmap_assign_A_template: traverse over A for bitmap assignment into C //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // This template traverses over all the entries of the matrix A and operates on // the corresponding entry in C(i,j), using the GB_AIJ_WORK macro. A can be // hypersparse, sparse, bitmap, or full. It is not a scalar. The matrix // C must be bitmap or full. { //-------------------------------------------------------------------------- // matrix assignment: slice the entries of A for each task //-------------------------------------------------------------------------- int nthreads = GB_nthreads (GB_NNZ (A) + A->nvec, chunk, nthreads_max) ; int ntasks = (nthreads == 1) ? 1 : (8 * nthreads) ; int64_t *pstart_slice = NULL, *kfirst_slice = NULL, *klast_slice = NULL ; if (!GB_ek_slice (&pstart_slice, &kfirst_slice, &klast_slice, A, &ntasks)) { // out of memory GB_FREE_ALL ; return (GrB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // traverse of the entries of the matrix A //-------------------------------------------------------------------------- int tid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (tid = 0 ; tid < ntasks ; tid++) { // if kfirst > klast then task tid does no work at all int64_t kfirst = kfirst_slice [tid] ; int64_t klast = klast_slice [tid] ; int64_t task_cnvals = 0 ; //---------------------------------------------------------------------- // traverse over A (:,kfirst:klast) //---------------------------------------------------------------------- for (int64_t k = kfirst ; k <= klast ; k++) { //------------------------------------------------------------------ // find the part of A(:,k) for this task //------------------------------------------------------------------ int64_t jA = GBH (Ah, k) ; int64_t pA_start, pA_end ; GB_get_pA (&pA_start, &pA_end, tid, k, kfirst, klast, pstart_slice, Ap, nI) ; //------------------------------------------------------------------ // traverse over A(:,jA), the kth vector of A //------------------------------------------------------------------ int64_t jC = GB_ijlist (J, jA, Jkind, Jcolon) ; int64_t pC0 = jC * cvlen ; // first entry in C(:,jC) for (int64_t pA = pA_start ; pA < pA_end ; pA++) { if (!GBB (Ab, pA)) continue ; int64_t iA = GBI (Ai, pA, nI) ; int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; int64_t pC = iC + pC0 ; // operate on C(iC,jC) at pC, and A(iA,jA) at pA. The mask // can be accessed at pC if M is bitmap or full. A has any // sparsity format so only A(iA,jA) can be accessed at pA. // To access a full matrix M for the subassign case, use // the position (iA + jA*nI). GB_AIJ_WORK (pC, pA) ; } } cnvals += task_cnvals ; } //-------------------------------------------------------------------------- // free workspace //-------------------------------------------------------------------------- GB_ek_slice_free (&pstart_slice, &kfirst_slice, &klast_slice) ; }
pcptdesdecryptcbccaomp.c
/******************************************************************************* * Copyright 2002-2018 Intel Corporation * All Rights Reserved. * * If this software was obtained under the Intel Simplified Software License, * the following terms apply: * * The source code, information and material ("Material") contained herein is * owned by Intel Corporation or its suppliers or licensors, and title to such * Material remains with Intel Corporation or its suppliers or licensors. The * Material contains proprietary information of Intel or its suppliers and * licensors. The Material is protected by worldwide copyright laws and treaty * provisions. No part of the Material may be used, copied, reproduced, * modified, published, uploaded, posted, transmitted, distributed or disclosed * in any way without Intel's prior express written permission. No license under * any patent, copyright or other intellectual property rights in the Material * is granted to or conferred upon you, either expressly, by implication, * inducement, estoppel or otherwise. Any license under such intellectual * property rights must be express and approved by Intel in writing. * * Unless otherwise agreed by Intel in writing, you may not remove or alter this * notice or any other notice embedded in Materials by Intel or Intel's * suppliers or licensors in any way. * * * If this software was obtained under the Apache License, Version 2.0 (the * "License"), the following terms apply: * * You may not use this file except in compliance with the License. You may * obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * * See the License for the specific language governing permissions and * limitations under the License. *******************************************************************************/ /* // Name: // ippsTDESDecryptCBC // // Purpose: // Cryptography Primitives. // Decrypt a byte data stream according to TDES. // // */ #include "owndefs.h" #if defined ( _OPENMP ) #include "owncp.h" #include "pcpdes.h" #include "pcptool.h" #include "omp.h" /*F* // Name: // ippsTDESDecryptCBC // // Purpose: // Decrypt byte data stream according to DES in EBC mode using OpenMP API. // // Returns: // ippStsNoErr No errors, it's OK // ippStsNullPtrErr ( pCtx1 == NULL ) || ( pCtx2 == NULL ) || // ( pCtx3 == NULL ) || ( pSrc == NULL ) || // ( pDst == NULL ) || ( pIV == NULL ) // ippStsLengthErr srcLen < 1 // ippStsContextMatchErr ( pCtx1->idCtx != idCtxDES ) || // ( pCtx2->idCtx != idCtxDES ) || // ( pCtx3->idCtx != idCtxDES ) // ippStsUnderRunErr ( srcLen % 8 ) != 0 // // Parameters: // pSrc Pointer to the input ciphertext data stream. // pDst Pointer to the resulting plaintext data stream. // srcLen Ciphertext data stream length in bytes. // pCtx DES context. // pIV Pointers to the IppsDESSpec contexts. // padding Padding scheme indicator. // // Notes: // *F*/ static void TDES_CBC_processing(const Ipp8u* pIV, const Ipp8u* pSrc, Ipp8u* pDst, int nBlocks, const RoundKeyDES* pRKey[3]) { /* copy IV */ Ipp64u iv; CopyBlock8(pIV, &iv); /* // decrypt block-by-block aligned streams */ if( !(IPP_UINT_PTR(pSrc) & 0x7) && !(IPP_UINT_PTR(pDst) & 0x7) && pSrc!=pDst) { DecryptCBC_TDES((const Ipp64u*)pSrc, (Ipp64u*)pDst, nBlocks, pRKey, iv, DESspbox); } /* // decrypt block-by-block misaligned streams */ else { Ipp64u tmpInp; Ipp64u tmpOut; while(nBlocks) { CopyBlock8(pSrc, &tmpInp); tmpOut = Cipher_DES(tmpInp, pRKey[0], DESspbox); tmpOut = Cipher_DES(tmpOut, pRKey[1], DESspbox); tmpOut = iv ^ Cipher_DES(tmpOut, pRKey[2], DESspbox); CopyBlock8(&tmpOut, pDst); iv = tmpInp; pSrc += MBS_DES; pDst += MBS_DES; nBlocks--; } } } IPPFUN( IppStatus, ippsTDESDecryptCBC,(const Ipp8u* pSrc, Ipp8u* pDst, int srcLen, const IppsDESSpec* pCtx1, const IppsDESSpec* pCtx2, const IppsDESSpec* pCtx3, const Ipp8u* pIV, IppsPadding padding)) { /* test the pointers */ IPP_BAD_PTR3_RET(pCtx1, pCtx2, pCtx3); IPP_BAD_PTR3_RET(pSrc, pIV, pDst); /* align the context */ pCtx1 = (IppsDESSpec*)(IPP_ALIGNED_PTR(pCtx1, DES_ALIGNMENT)); pCtx2 = (IppsDESSpec*)(IPP_ALIGNED_PTR(pCtx2, DES_ALIGNMENT)); pCtx3 = (IppsDESSpec*)(IPP_ALIGNED_PTR(pCtx3, DES_ALIGNMENT)); /* test the context */ IPP_BADARG_RET(!DES_ID_TEST(pCtx1), ippStsContextMatchErr); IPP_BADARG_RET(!DES_ID_TEST(pCtx2), ippStsContextMatchErr); IPP_BADARG_RET(!DES_ID_TEST(pCtx3), ippStsContextMatchErr); /* test the data stream length */ IPP_BADARG_RET((srcLen<1), ippStsLengthErr); /* Test data stream integrity. */ IPP_BADARG_RET((srcLen&(MBS_DES-1)), ippStsUnderRunErr); UNREFERENCED_PARAMETER(padding); { int nBlocks = srcLen / MBS_DES; int nThreads = IPP_MIN(IPPCP_GET_NUM_THREADS(), IPP_MAX(nBlocks/TDES_MIN_BLK_PER_THREAD, 1)); const RoundKeyDES* pRKey[3]; pRKey[0] = DES_DKEYS(pCtx3); pRKey[1] = DES_EKEYS(pCtx2); pRKey[2] = DES_DKEYS(pCtx1); if(1==nThreads) TDES_CBC_processing(pIV, pSrc, pDst, nBlocks, pRKey); else { int blksThreadReg; int blksThreadTail; Ipp8u locIV[MBS_DES*DEFAULT_CPU_NUM]; #if defined(__INTEL_COMPILER) Ipp8u* pLocIV = nThreads>DEFAULT_CPU_NUM? kmp_malloc(nThreads*MBS_DES) : locIV; #else Ipp8u* pLocIV = nThreads>DEFAULT_CPU_NUM ? malloc(nThreads*MBS_DES) : locIV; #endif if(pLocIV) { #pragma omp parallel IPPCP_OMP_LIMIT_MAX_NUM_THREADS(nThreads) { #pragma omp master { int nt; nThreads = omp_get_num_threads(); blksThreadReg = nBlocks / nThreads; blksThreadTail = blksThreadReg + nBlocks % nThreads; CopyBlock8(pIV, pLocIV+0); for(nt=1; nt<nThreads; nt++) CopyBlock8(pSrc+nt*blksThreadReg*MBS_DES-MBS_DES, pLocIV+nt*MBS_DES); } #pragma omp barrier { int id = omp_get_thread_num(); Ipp8u* pThreadIV = (Ipp8u*)pLocIV +id*MBS_DES; Ipp8u* pThreadSrc = (Ipp8u*)pSrc + id*blksThreadReg*MBS_DES; Ipp8u* pThreadDst = (Ipp8u*)pDst + id*blksThreadReg*MBS_DES; int blkThread = (id==(nThreads-1))? blksThreadTail : blksThreadReg; TDES_CBC_processing(pThreadIV, pThreadSrc, pThreadDst, blkThread, pRKey); } } if (pLocIV != locIV) #if defined(__INTEL_COMPILER) kmp_free(pLocIV); #else free(pLocIV); #endif } else return ippStsMemAllocErr; } return ippStsNoErr; } } #endif
DRB052-indirectaccesssharebase-orig-no.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This example is to mimic a memory access pattern extracted from an LLNL proxy app. Two pointers have distance of 12. They are used as base addresses of two arrays, indexed through an index set. The index set has no two indices with distance of 12. So there is no loop carried dependence. */ #include <assert.h> #include <stdio.h> #include <stdlib.h> #define N 180 #include <omp.h> int indexSet[180] = {(521), (523), (525), (527), (529), (531), (547), (549), (551), (553), (555), (557), (573), (575), (577), (579), (581), (583), (599), (601), (603), (605), (607), (609), (625), (627), (629), (631), (633), (635), (651), (653), (655), (657), (659), (661), (859), (861), (863), (865), (867), (869), (885), (887), (889), (891), (893), (895), (911), (913), (915), (917), (919), (921), (937), (939), (941), (943), (945), (947), (963), (965), (967), (969), (971), (973), (989), (991), (993), (995), (997), (999), (1197), (1199), (1201), (1203), (1205), (1207), (1223), (1225), (1227), (1229), (1231), (1233), (1249), (1251), (1253), (1255), (1257), (1259), (1275), (1277), (1279), (1281), (1283), (1285), (1301), (1303), (1305), (1307), (1309), (1311), (1327), (1329), (1331), (1333), (1335), (1337), (1535), (1537), (1539), (1541), (1543), (1545), (1561), (1563), (1565), (1567), (1569), (1571), (1587), (1589), (1591), (1593), (1595), (1597), (1613), (1615), (1617), (1619), (1621), (1623), (1639), (1641), (1643), (1645), (1647), (1649), (1665), (1667), (1669), (1671), (1673), (1675), (1873), (1875), (1877), (1879), (1881), (1883), (1899), (1901), (1903), (1905), (1907), (1909), (1925), (1927), (1929), (1931), (1933), (1935), (1951), (1953), (1955), (1957), (1959), (1961), (1977), (1979), (1981), (1983), (1985), (1987), (2003), (2005), (2007), (2009), (2011), (2013)}; int main(int argc,char *argv[]) { double *base = (double *)(malloc(sizeof(double ) * (2013 + 12 + 1))); if (base == 0) { printf("Error, malloc() returns NULL. End execution. \n"); return 1; } double *xa1 = base; double *xa2 = base + 12; int i; #pragma omp parallel for private (i) for (i = 521; i <= 2025; i += 1) { base[i] = 0.0; } // this level of loop has no loop carried dependence for (i = 0; i <= 179; i += 1) { int idx = indexSet[i]; xa1[idx] += 1.0; xa2[idx] += 3.0; } // verify the results, no overlapping of xa1 vs. xa2, no addition happens to the same element twice for (i = 521; i <= 2025; i += 1) { //printf ("%f ", base[i]); (((void )(sizeof(((base[i] != 4.0?1 : 0))))) , (( { if (base[i] != 4.0) ; else __assert_fail("base[i]!=4.0","DRB052-indirectaccesssharebase-orig-no.c",126,__PRETTY_FUNCTION__); }))); } free(base); return 0; }
pi.c
/* Copyright since 2016 the OMPi Team Dept. of Computer Science & Engineering, University of Ioannina This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. */ /* pi.c * ---- * Simple pi calculation executed either on the Zynq or the Epiphany. */ #include <stdio.h> #include <omp.h> #define N 2000000 /* Intervals */ /* Naive pi calculation based on the trapezoid rule. */ void calc_pi(int nthr) { double W = 4.0 / N, W2 = W * W / 16.0, pi = 0.0; /* Executed @ host (zynq, aka device 0) */ #pragma omp parallel num_threads(nthr) reduction(+:pi) { int i = omp_get_thread_num(); /* Offloaded @ default device (normally epiphany). * Note that pi is implicitly mapped as tofrom. */ #pragma omp target map(to:i,W,W2,nthr) { for (; i < N; i += nthr) pi += W / (1.0 + (0.5 + i) * (0.5 + i) * W2); } } printf("pi = %.10lf", pi); } /* Calculate pi with 1, 2, 4, 8, 16 threads/kernels. */ int main() { int nthr, max_kernels; double t1, t2; if (omp_get_default_device() == 0) { printf("Calculating pi on the Zynq, using 1 and 2 kernels..\n"); max_kernels = 2; } else { printf("Calculating pi on the Epiphany, using 1, 2, 4, 8, and 16 kernels..\n"); max_kernels = 16; } for (nthr = 1; nthr <= max_kernels; nthr <<= 1) { t1 = omp_get_wtime(); calc_pi(nthr); t2 = omp_get_wtime(); printf("\t[%2d kernels => %2.5lf sec]\n", nthr, t2 - t1); } return 0; }
convolution_sgemm_pack1to4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void im2col_sgemm_pack1to4_sse(const Mat& bottom_im2col, Mat& top_blob, const Mat& kernel, const Mat& _bias, const Option& opt) { // Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator); const int size = bottom_im2col.w; const int maxk = bottom_im2col.h; const int inch = bottom_im2col.c; const int outch = top_blob.c; const float* bias = _bias; // permute Mat tmp; if (size >= 8) tmp.create(8 * maxk, inch, size / 8 + (size % 8) / 4 + size % 4, 4u, 1, opt.workspace_allocator); else if (size >= 4) tmp.create(4 * maxk, inch, size / 4 + size % 4, 4u, 1, opt.workspace_allocator); else tmp.create(maxk, inch, size, 4u, 1, opt.workspace_allocator); { int nn_size = size >> 3; int remain_size_start = 0; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 8; float* tmpptr = tmp.channel(i / 8); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { __m128 _r0 = _mm_loadu_ps(img0); __m128 _r1 = _mm_loadu_ps(img0 + 4); _mm_store_ps(tmpptr, _r0); _mm_store_ps(tmpptr + 4, _r1); img0 += size; tmpptr += 8; } } } remain_size_start += nn_size << 3; nn_size = (size - remain_size_start) >> 2; #pragma omp parallel for num_threads(opt.num_threads) for (int ii = 0; ii < nn_size; ii++) { int i = remain_size_start + ii * 4; float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { __m128 _r0 = _mm_loadu_ps(img0); _mm_store_ps(tmpptr, _r0); img0 += size; tmpptr += 4; } } } remain_size_start += nn_size << 2; #pragma omp parallel for num_threads(opt.num_threads) for (int i = remain_size_start; i < size; i++) { float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); for (int q = 0; q < inch; q++) { const float* img0 = (const float*)bottom_im2col.channel(q) + i; for (int k = 0; k < maxk; k++) { tmpptr[0] = img0[0]; img0 += size; tmpptr += 1; } } } } #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr0 = top_blob.channel(p); const float zeros[4] = {0.f, 0.f, 0.f, 0.f}; const float* biasptr = bias ? bias + p * 4 : zeros; int i = 0; for (; i + 7 < size; i += 8) { const float* tmpptr = tmp.channel(i / 8); const float* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 __m128 _sum0 = _mm_loadu_ps(biasptr); __m128 _sum1 = _sum0; __m128 _sum2 = _sum0; __m128 _sum3 = _sum0; __m128 _sum4 = _sum0; __m128 _sum5 = _sum0; __m128 _sum6 = _sum0; __m128 _sum7 = _sum0; for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(kptr0); __m128 _val0 = _mm_load1_ps(tmpptr); __m128 _val1 = _mm_load1_ps(tmpptr + 1); __m128 _val2 = _mm_load1_ps(tmpptr + 2); __m128 _val3 = _mm_load1_ps(tmpptr + 3); __m128 _val4 = _mm_load1_ps(tmpptr + 4); __m128 _val5 = _mm_load1_ps(tmpptr + 5); __m128 _val6 = _mm_load1_ps(tmpptr + 6); __m128 _val7 = _mm_load1_ps(tmpptr + 7); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); _sum4 = _mm_comp_fmadd_ps(_val4, _w0, _sum4); _sum5 = _mm_comp_fmadd_ps(_val5, _w0, _sum5); _sum6 = _mm_comp_fmadd_ps(_val6, _w0, _sum6); _sum7 = _mm_comp_fmadd_ps(_val7, _w0, _sum7); tmpptr += 8; kptr0 += 4; } _mm_store_ps(outptr0, _sum0); _mm_store_ps(outptr0 + 4, _sum1); _mm_store_ps(outptr0 + 8, _sum2); _mm_store_ps(outptr0 + 12, _sum3); _mm_store_ps(outptr0 + 16, _sum4); _mm_store_ps(outptr0 + 20, _sum5); _mm_store_ps(outptr0 + 24, _sum6); _mm_store_ps(outptr0 + 28, _sum7); outptr0 += 32; } for (; i + 3 < size; i += 4) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4); const float* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 __m128 _sum0 = _mm_loadu_ps(biasptr); __m128 _sum1 = _sum0; __m128 _sum2 = _sum0; __m128 _sum3 = _sum0; for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(kptr0); __m128 _val0 = _mm_load1_ps(tmpptr); __m128 _val1 = _mm_load1_ps(tmpptr + 1); __m128 _val2 = _mm_load1_ps(tmpptr + 2); __m128 _val3 = _mm_load1_ps(tmpptr + 3); _sum0 = _mm_comp_fmadd_ps(_val0, _w0, _sum0); _sum1 = _mm_comp_fmadd_ps(_val1, _w0, _sum1); _sum2 = _mm_comp_fmadd_ps(_val2, _w0, _sum2); _sum3 = _mm_comp_fmadd_ps(_val3, _w0, _sum3); tmpptr += 4; kptr0 += 4; } _mm_store_ps(outptr0, _sum0); _mm_store_ps(outptr0 + 4, _sum1); _mm_store_ps(outptr0 + 8, _sum2); _mm_store_ps(outptr0 + 12, _sum3); outptr0 += 16; } for (; i < size; i++) { const float* tmpptr = tmp.channel(i / 8 + (i % 8) / 4 + i % 4); const float* kptr0 = kernel.channel(p); int nn = inch * maxk; // inch always > 0 __m128 _sum = _mm_loadu_ps(biasptr); for (int j = 0; j < nn; j++) { __m128 _w0 = _mm_load_ps(kptr0); __m128 _val = _mm_load1_ps(tmpptr); _sum = _mm_comp_fmadd_ps(_w0, _val, _sum); tmpptr += 1; kptr0 += 4; } _mm_store_ps(outptr0, _sum); outptr0 += 4; } } } static void convolution_im2col_sgemm_transform_kernel_pack1to4_sse(const Mat& _kernel, Mat& kernel_tm, int inch, int outch, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // interleave // src = maxk-inch-outch // dst = 4b-4a-maxk-inch/4a-outch/4b Mat kernel = _kernel.reshape(maxk, inch, outch); kernel_tm.create(4 * maxk, inch, outch / 4); int q = 0; for (; q + 3 < outch; q += 4) { const Mat k0 = kernel.channel(q); const Mat k1 = kernel.channel(q + 1); const Mat k2 = kernel.channel(q + 2); const Mat k3 = kernel.channel(q + 3); float* g00 = kernel_tm.channel(q / 4); for (int p = 0; p < inch; p++) { const float* k00 = k0.row(p); const float* k10 = k1.row(p); const float* k20 = k2.row(p); const float* k30 = k3.row(p); for (int k = 0; k < maxk; k++) { g00[0] = k00[k]; g00[1] = k10[k]; g00[2] = k20[k]; g00[3] = k30[k]; g00 += 4; } } } } static void convolution_im2col_sgemm_pack1to4_sse(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel, const Mat& _bias, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, const Option& opt) { int w = bottom_blob.w; int inch = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; const int size = outw * outh; const int maxk = kernel_w * kernel_h; // im2col Mat bottom_im2col(size, maxk, inch, 4u, 1, opt.workspace_allocator); { const int gap = w * stride_h - outw * stride_w; #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < inch; p++) { const Mat img = bottom_blob.channel(p); float* ptr = bottom_im2col.channel(p); for (int u = 0; u < kernel_h; u++) { for (int v = 0; v < kernel_w; v++) { const float* sptr = img.row<const float>(dilation_h * u) + dilation_w * v; for (int i = 0; i < outh; i++) { int j = 0; for (; j + 3 < outw; j += 4) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; ptr[2] = sptr[stride_w * 2]; ptr[3] = sptr[stride_w * 3]; sptr += stride_w * 4; ptr += 4; } for (; j + 1 < outw; j += 2) { ptr[0] = sptr[0]; ptr[1] = sptr[stride_w]; sptr += stride_w * 2; ptr += 2; } for (; j < outw; j++) { ptr[0] = sptr[0]; sptr += stride_w; ptr += 1; } sptr += gap; } } } } } im2col_sgemm_pack1to4_sse(bottom_im2col, top_blob, kernel, _bias, opt); }
memdbg.c
/* * This software was written by Jim Fougeron jfoug AT cox dot net * in 2013. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2013 Jim Fougeron * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. */ /* * memdbg.c * Memory management debugging (at runtime) * * memdbg.c contains routines detect, and report memory * problems, such as double frees, passing bad pointers to * free, most buffer overwrites. Also, tracking of non-freed * data, showing memory leaks, can also be shown. * * Compilation Options (provided from Makefile CFLAGS) * * MEMDBG_ON If this is NOT defined, then memdbg will * get out of your way, and most normal memory functions * will be called with no overhead at all. * * MEMDBG_EXTRA_CHECKS If defined, then we do not 'really' free * the memory. We simply set the fence posts to deleted status, * and proceed. This allows us finding double frees, and other * usages of smashes. NOTE, when this is set, and there are a * LOT of memory alloc/frees, then at some point the calls to * free will fail. If this happens, there is code in place that * frees the oldest freed block (really frees it), and does that * over and over again, until either we have no freed blocks left * OR the app is able to allocate this new buffer. In this situation * we do lose track of those older freed blocks of memory, but it * allows the application to continue forward, even though this * debugging code exausted all memory. */ #if defined (MEMDBG_ON) #include <stdio.h> #include <stdlib.h> #include <string.h> #include "common.h" #define __MEMDBG_C_FILE__ #include "memdbg.h" #include "pseudo_intrinsics.h" #include "jumbo.h" #ifdef _OPENMP #include <omp.h> #endif /* * This function ALWAYS must be defined. It is (HAS) to be used if there is code which * has some library code that allocates memory which was NOT handled by one of the allocation * functions within this wrapper class, BUT which really needs to be freed. Thus the user code * really needs to have straight access to the libc function free(). We give them that access, * but they have to call this function, and not the 'free' function, which would get wrapped * and call into MEMDBG_free(p, filename, fileline). */ void MEMDBG_libc_free(void *p) { free(p); } void *MEMDBG_libc_alloc(size_t size) { return malloc(size); } void *MEMDBG_libc_calloc(size_t count, size_t size) { return calloc(count, size); } #ifdef _MSC_VER #define malloc(a) _aligned_malloc(a,16) #define realloc(a,b) _aligned_realloc(a,b,16) #define free(a) _aligned_free(a) #endif /* * these fence posts (first fence post guarding underflow), are: * MEMFPOST == allocated memory * MEMFPOSTt == allocated 'tiny' memory (allocated with mem_alloc_tiny() from memory.c) * MEMFPOSTd == freed (deleted) memory. Will only be set this way, and stored in the * freed_memlist, if MEMDBG_EXTRA_CHECKS is set. */ const char *cpMEMFPOST = "\xa5\xa5\xa5\xa5"; const char *cpMEMFPOSTd = "\x5a\x5a\x5a\x5a"; const char *cpMEMFPOSTt = "\xa5\x55\xa5\xa5"; /* * this structure will contain data that is butted RIGHT against * the tail end of the allocated block. We put a fence post here, * and thus can detect buffer overwrite. */ typedef struct _hdr2 { /* we use a unsigned char, and do not care about alignment. We ALWAYS treat this var with * a memcpy, memcmp, etc, so that this works the same on aligned required CPU or non-aligned required. */ unsigned char mdbg_fpst[4]; } MEMDBG_HDR2; /* * This structure is carefully crafted to keep it in proper alignment. * We later will put the HDR2 RIGHT against the head end and tail end * of the buffer. This allows us to catch 1 byte over or underflow. */ typedef struct _hdr { struct _hdr *mdbg_next; struct _hdr *mdbg_prev; /* points to just 'right' before allocated memory, for underflow catching */ MEMDBG_HDR2 *mdbg_hdr1; /* points to just 'right' after allocated memory, for overflow catching */ MEMDBG_HDR2 *mdbg_hdr2; const char *mdbg_file; uint32_t mdbg_line; uint32_t mdbg_cnt; uint32_t mdbg_size; } MEMDBG_HDR; static size_t mem_size = 0; static size_t max_mem_size = 0; static size_t mem_sizet = 0; static size_t max_mem_sizet = 0; static MEMDBG_HDR *memlist = NULL; static unsigned long alloc_cnt = 0; #ifdef MEMDBG_EXTRA_CHECKS static MEMDBG_HDR *freed_memlist = NULL; static size_t freed_mem_size = 0; static unsigned long freed_cnt = 0; #endif #define RESERVE_SZ (sizeof(MEMDBG_HDR) + sizeof(MEMDBG_HDR*) + 4 + 16) #define RESERVE_SZ_AL(a) (sizeof(MEMDBG_HDR) + sizeof(MEMDBG_HDR*) + 4 + 16 + a*2) #define CLIENT_2_HDR_PTR(a) ((MEMDBG_HDR *) (((char *) ((ARCH_WORD)(((char *)a)-4-sizeof(MEMDBG_HDR*)) & ~0xF)))) #define CLIENT_2_HDR(a) ((MEMDBG_HDR *) (((char *) ((ARCH_WORD)(((char *)a)-4-sizeof(MEMDBG_HDR*)) & ~0xF))))->mdbg_next #define HDR_2_CLIENT(a) ((void *) (((char*)((MEMDBG_HDR *) (a->mdbg_hdr1))) + 4)) static void mem_fence_post_err_fp (void *, const char *, int, char *fp, int line); static void mem_fence_post_err_ne_fp (void *, const char *, int, char *fp, int line); static void mem_fence_post_errd_fp (void *, const char *, int, char *fp, int line); static void mem_fence_post_errd_ne_fp(void *, const char *, int, char *fp, int line); #define mem_fence_post_err(a,b,c) mem_fence_post_err_fp(a,b,c,__FILE__,__LINE__) #define mem_fence_post_err_ne(a,b,c) mem_fence_post_err_ne_fp(a,b,c,__FILE__,__LINE__) #define mem_fence_post_errd(a,b,c) mem_fence_post_errd_fp(a,b,c,__FILE__,__LINE__) #define mem_fence_post_errd_ne(a,b,c) mem_fence_post_errd_ne_fp(a,b,c,__FILE__,__LINE__) #ifdef MEMDBG_EXTRA_CHECKS /* NOTE, which this function is called, the memory (client memory) gets SMASHED */ /* If this starts causing the program to crash, then it is likely that the client */ /* code is using dangling pointers by accessing the memory after a free or realloc */ static void MEMDBG_FREEDLIST_add(MEMDBG_HDR *); #endif /* * these are now macros. This makes it easier for doing omp critical * sections. It is illegal to branch into or out of a CRITICAL block */ #define MEMDBG_LIST_delete(p) \ if (p->mdbg_next != NULL) \ p->mdbg_next->mdbg_prev = p->mdbg_prev; \ if (p->mdbg_prev != NULL) \ p->mdbg_prev->mdbg_next = p->mdbg_next; \ else \ memlist = p->mdbg_next #define MEMDBG_LIST_add(p) \ p->mdbg_next = memlist; \ p->mdbg_prev = NULL; \ if (memlist != NULL) \ memlist->mdbg_prev = p; \ memlist = p /* * This function can be called directly by client code. * it lists how much memory is currently allocated. * a good check before program exit, is are there 0 * bytes allocated. */ size_t MemDbg_Used(int show_freed) { #ifdef MEMDBG_EXTRA_CHECKS if (show_freed) return freed_mem_size; #endif return mem_size+mem_sizet; } /* * This function can be called directly by client code. * It writes out all non-freed memory. */ void MemDbg_Display(FILE *fp) { MEMDBG_HDR *p; int idx; if (!(mem_size+mem_sizet) && !getenv("MEMDBG")) return; fprintf(fp, "\n------------------------------\n"); fprintf(fp, "MEMDBG: allocation information (display):\n"); fprintf(fp, " current normal alloc mem (leaks)"LLu" max normal mem allocated: "LLu"\n", (unsigned long long)mem_size, (unsigned long long)max_mem_size); fprintf(fp, " current 'tiny' alloc mem (leaks)"LLu" max tiny mem allocated: "LLu"\n", (unsigned long long)mem_sizet, (unsigned long long)max_mem_sizet); #ifdef MEMDBG_EXTRA_CHECKS fprintf(fp, " Freed mem size: "LLu" (freed cnt: %lu)", (unsigned long long)freed_mem_size, freed_cnt); #endif if (!(mem_size+mem_sizet)) return; fprintf(fp, "\n"); fprintf(fp, "Index : alloc# : Size : File(Line) [first 20 bytes, or size of bytes]\n"); idx = 0; p = memlist; while (p != NULL) { int bfreed = 0, bbad=0; fprintf(fp, "%-5d : %-6d : %6llu : %s(%u)", idx++, p->mdbg_cnt, (unsigned long long)p->mdbg_size, p->mdbg_file, p->mdbg_line); if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOST, 4) && memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4)) { bbad=1; if (!memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTd, 4)) { fprintf(fp, " INVALID ( freed already? )"); bfreed = 1; } else fprintf(fp, " INVALID ( buffer underflow )"); } if (memcmp(p->mdbg_hdr2->mdbg_fpst, cpMEMFPOST, 4)) { if (bfreed && !memcmp(p->mdbg_hdr2->mdbg_fpst, cpMEMFPOSTd, 4)) { bbad=1; fprintf(fp, " YES Data was freed."); } else { unsigned i; char *cp = ((char*)p)+RESERVE_SZ; fprintf(fp, " INVALID (buffer overflow) tail of block: "); cp = (char*)p->mdbg_hdr2->mdbg_fpst; cp -= 16; for (i = 0; i < 20; ++i) { if (*cp < ' ' || *cp > '~') fprintf(fp, "."); else fprintf(fp, "%c", *cp); ++cp; } fprintf(fp, " and the head of the block was: "); } } if (!bbad) { unsigned i; char *cp = ((char*)p)+RESERVE_SZ; fprintf(fp, " "); for (i = 0; i < 20 && i < p->mdbg_size; ++i) { if (*cp < ' ' || *cp > '~') fprintf(fp, "."); else fprintf(fp, "%c", *cp); ++cp; } } fprintf(fp, "\n"); p = p->mdbg_next; } } /* * This function can be called directly by client code. * It will walk the list of memory, 'looking' for errors. */ void MemDbg_Validate(int level) { MemDbg_Validate_msg2(level, NULL, 0); } void MemDbg_Validate_msg(int level, const char *pMsg) { MemDbg_Validate_msg2(level, pMsg, 0); } void MemDbg_Validate_msg2(int level, const char *pMsg, int bShowExMessages) { /* Level 0 we ALWAYS walk the alloc list, looking for over/underwrite, and validate a few other items. */ MEMDBG_HDR *p = memlist; int error = 0; int cnt=0; #ifdef MEMDBG_EXTRA_CHECKS unsigned char *cp; unsigned i; #endif if (bShowExMessages) { if (pMsg) fprintf(stderr, "%s\n", pMsg); fprintf(stderr, "MemDbg_Validate level 0 checking"); } while (p) { if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOST, 4) && memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4)) { ++cnt; if (cnt < 100) { if (!memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTd, 4)) fprintf(stderr, "\nDeleted memory still in chain\n"); else { fprintf(stderr, "\nMemory buffer underwrite found! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); } } error = 1; } if (memcmp(p->mdbg_hdr2->mdbg_fpst, cpMEMFPOST, 4)) { ++cnt; if (cnt < 100) { if (!memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTd, 4)) { } else { fprintf(stderr, "\nMemory buffer overwrite found! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); } } error = 1; } // Loop detect code { MEMDBG_HDR volatile *p2 = p->mdbg_next; while (p2) { if (p2 == p || p2 == p2->mdbg_next) { fprintf (stderr, "Error, internal loop in the memdbg linked list, aborting\n"); break; } p2 = p2->mdbg_next; } } if (cnt > 1000) break; p = p->mdbg_next; } if (error) { fprintf(stderr, "\nExiting due to the error detected\n"); if (cnt > 100) fprintf(stderr, "There were %d total errors, only first 100 shown\n", cnt); exit(1); } if (bShowExMessages) fprintf(stderr, " Passed\n"); if (level == MEMDBG_VALIDATE_MIN) return; #ifdef MEMDBG_EXTRA_CHECKS // Ok, we have a list of all freed items. We will do work on this. p = freed_memlist; if (!p) return; cnt = 0; if (bShowExMessages) fprintf(stderr, "MemDbg_Validate level 1 checking"); while (p) { if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTd, 4)) { ++cnt; if (cnt < 100) fprintf(stderr, "\nFreed Memory buffer underwrite found! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); error = 1; } if (memcmp(p->mdbg_hdr2->mdbg_fpst, cpMEMFPOSTd, 4)) { ++cnt; if (cnt < 100) fprintf(stderr, "\nFreed Memory buffer overwrite found! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); error = 1; } // Loop detect code { MEMDBG_HDR *p2 = p->mdbg_next; while (p2) { if (p2 == p || p2 == p2->mdbg_next) { fprintf (stderr, "Error, internal loop in the memdbg linked list, aborting\n"); break; } p2 = p2->mdbg_next; } } if (cnt > 1000) break; p = p->mdbg_next; } if (error) { fprintf(stderr, "\nExiting due to the error detected\n"); if (cnt > 100) fprintf(stderr, "There were %d total errors, only first 100 shown\n", cnt); exit(1); } if (bShowExMessages) fprintf(stderr, " Passed\n"); if (level == MEMDBG_VALIDATE_DEEP) return; p = freed_memlist; cnt = 0; if (bShowExMessages) fprintf(stderr, "MemDbg_Validate level 2 checking"); while (p) { cp = (unsigned char*)HDR_2_CLIENT(p); if (p->mdbg_size != p->mdbg_hdr2->mdbg_fpst - cp) { fprintf(stderr, "\nFreed Memory buffer underwrite found (size var busted)! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); error = 1; } else { for (i = 0; i < p->mdbg_size; ++i) { // in 'deeper' mode, we only look at first 8 bytes. If these are not overwritten, it is less likely that the buffer // has been written to. It 'can' be written to later on, and if we use deepest, we will look at the FULL buffer. if (i == 8) break; if (*cp++ != 0xCD) { ++cnt; if (cnt < 100) fprintf(stderr, "\nFreed Memory buffer modification found! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); error = 1; break; } } } // Loop detect code { MEMDBG_HDR *p2 = p->mdbg_next; while (p2) { if (p2 == p || p2 == p2->mdbg_next) { fprintf (stderr, "Error, internal loop in the memdbg linked list, aborting\n"); break; } p2 = p2->mdbg_next; } } if (cnt > 1000) break; p = p->mdbg_next; } if (error) { fprintf(stderr, "\nExiting due to the error detected\n"); if (cnt > 100) fprintf(stderr, "There were %d total errors, only first 100 shown\n", cnt); exit(1); } if (bShowExMessages) fprintf(stderr, " Passed\n"); if (level == MEMDBG_VALIDATE_DEEPER) return; p = freed_memlist; cnt = 0; if (bShowExMessages) fprintf(stderr, "MemDbg_Validate level 3 checking"); while (p) { cp = (unsigned char*)HDR_2_CLIENT(p); // in this deepest mode, we look at the ENTIRE buffer. In deeper, we looked at first 8, so here, we just start from 8 and look forward. for (i = 8; i < p->mdbg_size; ++i) { if (*cp++ != 0xCD) { ++cnt; if (cnt < 100) fprintf(stderr, "\nFreed Memory buffer modification found! Will try to list what file/line allocated the buffer\n"); mem_fence_post_err_ne(p, p->mdbg_file, p->mdbg_line); error = 1; break; } } // Loop detect code { MEMDBG_HDR *p2 = p->mdbg_next; while (p2) { if (p2 == p || p2 == p2->mdbg_next) { fprintf (stderr, "Error, internal loop in the memdbg linked list, aborting\n"); break; } p2 = p2->mdbg_next; } } if (cnt > 1000) break; p = p->mdbg_next; } if (error) { fprintf(stderr, "\nExiting due to the error detected\n"); if (cnt > 100) fprintf(stderr, "There were %d total errors, only first 100 shown\n", cnt); exit(1); } if (bShowExMessages) fprintf(stderr, " Passed\n"); #endif } #ifdef MEMDBG_EXTRA_CHECKS /* Ok, if we are out of memory, due to keeping too much freed memory around, then free * up oldest blocks until we can malloc this block. the rar format is a bad actor, * as could be many of the 'non-hash' (old zip for sure), as these have to decrypt * a full file, to be assured the password is correct. */ static void release_oldest_freed_block() { MEMDBG_HDR *p = freed_memlist, *pp; if (!p) return; #ifdef _OPENMP #pragma omp critical (memdbg_crit) #endif { p = freed_memlist; while (p->mdbg_next) p = p->mdbg_next; // now unlink it. freed_mem_size -= p->mdbg_size; --freed_cnt; p->mdbg_prev->mdbg_next = NULL; pp = p->mdbg_prev; } // now free it free(p); if (freed_cnt > 10) { // free one more. #ifdef _OPENMP #pragma omp critical (memdbg_crit) { // NOTE, we can not be assured that pp was still pointing // to the last item in the list. We have to look AGAIN, // within a critical section. pp = freed_memlist; while (pp->mdbg_next) pp = pp->mdbg_next; #endif freed_mem_size -= pp->mdbg_size; --freed_cnt; pp->mdbg_prev->mdbg_next = NULL; #ifdef _OPENMP } #endif // now free it free(pp); } } #endif void * MEMDBG_calloc(size_t count, size_t size, char *file, int line) { char *p; size *= count; if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_calloc "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); p = (char*)MEMDBG_alloc(size,file,line); memset(p, 0, size); return p; } /* * MEMDBG_alloc * Allocate a memory block. makes a protected call to malloc(), allocating * extra data, and adding data to all required structures. */ void * MEMDBG_alloc(size_t size, char *file, int line) { MEMDBG_HDR *p, *p2; if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_alloc "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); // TODO: we have to compute proper size here. p = (MEMDBG_HDR*)malloc(RESERVE_SZ + size + 4); #ifdef MEMDBG_EXTRA_CHECKS #ifdef _OPENMP { int i = 0; do { #pragma omp critical (memdbg_crit) { if (!p && freed_mem_size > (RESERVE_SZ + size + 4) && !p && freed_cnt) i = 1; } if (i) { release_oldest_freed_block(); p = (MEMDBG_HDR*)malloc(RESERVE_SZ + size + 4); } } while (i && !p); } #else /* this is the 'right' block, but hard to do with the restrictions of no branching out that omp critical places on us */ if (!p && freed_mem_size > (RESERVE_SZ + size + 4)) { while (!p && freed_cnt) { release_oldest_freed_block(); p = (MEMDBG_HDR*)malloc(RESERVE_SZ + size + 4); } } #endif #endif if (!p) { if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_alloc (end) "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); return NULL; } p->mdbg_hdr1 = (MEMDBG_HDR2*)(((char*)p)+RESERVE_SZ-4); p2 = CLIENT_2_HDR_PTR(p->mdbg_hdr1+4); memcpy(p2, &p, sizeof(p)); memcpy(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOST, 4); p->mdbg_size = size; p->mdbg_file = file; p->mdbg_line = line; p->mdbg_hdr2 = (MEMDBG_HDR2*)(((char*)p->mdbg_hdr1)+4 + size); memcpy(p->mdbg_hdr2, cpMEMFPOST, 4); #ifdef _OPENMP #pragma omp critical (memdbg_crit) #endif { p->mdbg_cnt = ++alloc_cnt; mem_size += size; if (mem_size > max_mem_size) max_mem_size = mem_size; MEMDBG_LIST_add(p); } if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_alloc (end) "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); return HDR_2_CLIENT(p); } /* * MEMDBG_alloc_align * Allocate a memory block. makes a protected call to malloc(), allocating * extra data, and adding data to all required structures. */ void * MEMDBG_alloc_align(size_t size, int align, char *file, int line) { MEMDBG_HDR *p, *p2; char *p3; if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_alloc_align "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); p = (MEMDBG_HDR*)malloc(RESERVE_SZ_AL(align) + size + 4); if (!p) { if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_alloc_align (end) "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); return NULL; } p3 = ((char*)p)+RESERVE_SZ+align-1-4; p3 -= ((size_t)p3)%align; if ( (((size_t)p3)/align) % align == 0) p3 += align; p->mdbg_hdr1 = (MEMDBG_HDR2*)(p3-4); p2 = CLIENT_2_HDR_PTR(p3); memcpy(p2, &p, sizeof(p)); memcpy(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOST, 4); p->mdbg_size = size; p->mdbg_file = file; p->mdbg_line = line; p->mdbg_hdr2 = (MEMDBG_HDR2*)(p3 + size); memcpy(p->mdbg_hdr2, cpMEMFPOST, 4); #ifdef _OPENMP #pragma omp critical (memdbg_crit) #endif { p->mdbg_cnt = ++alloc_cnt; mem_size += size; if (mem_size > max_mem_size) max_mem_size = mem_size; MEMDBG_LIST_add(p); } if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_alloc_align (end) "LLd" %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); return HDR_2_CLIENT(p); } /* * MEMDBG_realloc * Reallocate a memory block makes a protected call to realloc(), allocating * extra data, and adding data to all required structures. * *** realloc is a NASTY function. The code here has taken a few turns, and * has reduced this to simply allocating a new block (or freeing if size is 0) * and copying the 'known' amount of data to the new block, and then freeing * the prior block. If the realloc is larger than before, then then undefined * data at end of the block is set to 0xcd. NOTE, this code was changed in * this manner due to not being able to find the bug in the original re-alloc * and bug #2062 in the rar format. */ void * MEMDBG_realloc(void *ptr, size_t size, char *file, int line) { MEMDBG_HDR *p; unsigned char *v; if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_realloc("LLd") %s:%d mem:"LLd"\n", (unsigned long long)size, file, line, (unsigned long long)mem_size); /* if ptr is null, this function works just like alloc, so simply use alloc */ if (!ptr) return MEMDBG_alloc(size, file, line); if (!size) { MEM_FREE(ptr); return NULL; } v = (unsigned char*)MEMDBG_alloc(size, file, line); p = CLIENT_2_HDR(ptr); if (size > p->mdbg_size) { memcpy(v, ((unsigned char*)(p->mdbg_hdr1))+4, p->mdbg_size); memset(v+p->mdbg_size, 0xcd, size-p->mdbg_size); } else memcpy(v, ((unsigned char*)(p->mdbg_hdr1))+4, size); MEMDBG_free(ptr,file,line); return v; } /* * MEMDBG_strdup * Duplicate a ASCIIZ string in memory, with a protected call to strdup, * allocating extra data, and adding data to all required structures. */ char *MEMDBG_strdup(const char *str, char *file, int line) { char * s; if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_strdup(%ld) %s:%d mem:"LLd"\n", (long)strlen(str), file, line, (unsigned long long)mem_size); s = (char*)MEMDBG_alloc(strlen(str)+1, file, line); if (s != NULL) strcpy(s, str); return s; } /* * Return the count 'id' count of an allocated block. This will match the * value shown on a leak report, and may help to line up exactly which * block is leaking */ unsigned MEMDBG_get_cnt (const void *ptr, const char **err_msg) { MEMDBG_HDR *p = CLIENT_2_HDR(ptr); *err_msg = "valid memdbg block"; if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4)) *err_msg = "INVALID memdbg memory (possible underflow), mdbg_cnt returned may not be correct!"; return (unsigned)p->mdbg_cnt; } /* * Return the size of the allocated buffer. The size here is the size of data * that the user would see. This is not the full memdbg buffer size. This * would be the size reported in a leak report. */ size_t MEMDBG_get_size(const void *ptr, const char **err_msg) { MEMDBG_HDR *p = CLIENT_2_HDR(ptr); *err_msg = "valid memdbg block"; if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4)) *err_msg = "INVALID memdbg memory (possible underflow), mdbg_size returned may not be correct!"; return p->mdbg_size; } /* * Return the file and line number of the caller code that allocated this * buffer. This is not the full memdbg buffer size. This would be the * size reported in a leak report. */ const char *MEMDBG_get_file(const void *ptr, const char **err_msg) { MEMDBG_HDR *p = CLIENT_2_HDR(ptr); *err_msg = "valid memdbg block"; if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4)) *err_msg = "INVALID memdbg memory (possible underflow), mdbg_file returned may not be correct!"; return p->mdbg_file; } unsigned MEMDBG_get_line(const void *ptr, const char **err_msg) { MEMDBG_HDR *p = CLIENT_2_HDR(ptr); *err_msg = "valid memdbg block"; if (memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4)) *err_msg = "INVALID memdbg memory (possible underflow), mdbg_line returned may not be correct!"; return (unsigned)p->mdbg_line; } /* * MEMDBG_free * Free a memory block, checking a lot of data, which would have been * set at allocation time. */ void MEMDBG_free(const void *ptr, char *file, int line) { MEMDBG_HDR *p; int err=0, i; #ifdef _OPENMP #pragma omp critical (memdbg_crit) #endif { p = CLIENT_2_HDR(ptr); /* is this correctly allocated memory */ for (i = 0; i < 4; ++i) if ( ((char*)(p->mdbg_hdr1->mdbg_fpst))[i] != cpMEMFPOST[i] || ((char*)(p->mdbg_hdr2->mdbg_fpst))[i] != cpMEMFPOST[i]) break; if (i == 4) /* yes, correctly allocated memory */ mem_size -= p->mdbg_size; else { /* it could be a 'tiny' allocated block */ for (i = 0; i < 4; ++i) if ( ((char*)(p->mdbg_hdr1->mdbg_fpst))[i] != cpMEMFPOSTt[i] || ((char*)(p->mdbg_hdr2->mdbg_fpst))[i] != cpMEMFPOST[i]) break; if (i == 4) /* yes, and valid tiny block */ mem_sizet -= p->mdbg_size; else { /* some error, i.e. bad block */ err = 1; for (i = 0; i < 4; ++i) { if (((char*)(p->mdbg_hdr1->mdbg_fpst))[i] != cpMEMFPOSTd[i] || ((char*)(p->mdbg_hdr2->mdbg_fpst))[i] != cpMEMFPOSTd[i]) { break; } } if (i == 4) err = 2; /* double free */ } } if (!err) { MEMDBG_LIST_delete(p); for (i = 0; i < 4; ++i) { ((char*)(p->mdbg_hdr2->mdbg_fpst))[i] = cpMEMFPOSTd[i]; ((char*)(p->mdbg_hdr1->mdbg_fpst))[i] = cpMEMFPOSTd[i]; } } } if (err) { if (err == 2) mem_fence_post_errd(p, file, line); else mem_fence_post_err(p, file, line); return; } #ifndef MEMDBG_EXTRA_CHECKS free(p); #else MEMDBG_FREEDLIST_add(p); #endif if ( ((signed long long)mem_size) < 0) fprintf(stderr, "MEMDBG_free (end) %s:%d mem:"LLd"\n", file, line, (unsigned long long)mem_size); } #ifdef MEMDBG_EXTRA_CHECKS /* NOTE, there is no LIST_delete() for the freed list. We only put * data onto this list, it is kept for full runtime. We may want to * later add some way for the app to clean it up, but for now, we * add it, and keep it all. */ static void MEMDBG_FREEDLIST_add(MEMDBG_HDR *p) { unsigned char *cp; size_t i; #ifdef _OPENMP #pragma omp critical (memdbg_crit) #endif { freed_mem_size += p->mdbg_size; ++freed_cnt; p->mdbg_next = freed_memlist; p->mdbg_prev = NULL; if (freed_memlist != NULL) freed_memlist->mdbg_prev = p; freed_memlist = p; /* Ok, now 'DEADBEEF' the original data buffer */ cp = (unsigned char*)HDR_2_CLIENT(p); for (i = 0; i < p->mdbg_size; ++i) *cp++ = 0xCD; } } #endif /* *these functions allow taking a memory snapshot, * calling some code, then validating that memory * is the same after the code. This will help * catch memory leaks and other such problems, within * formats and such. Simply get the snapshot, * run self tests (or other), when it exits, check * the snapshot to make sure nothing leaked. */ MEMDBG_HANDLE MEMDBG_getSnapshot(int id) { MEMDBG_HANDLE h; h.id = id; h.mem_size = mem_size; h.alloc_cnt = alloc_cnt; return h; } void MEMDBG_checkSnapshot(MEMDBG_HANDLE h) { /* call the real function, but list do not exit on leak */ MEMDBG_checkSnapshot_possible_exit_on_error(h,0); } /* NOT needed to be thread safe, must be called from single threaded code */ void MEMDBG_checkSnapshot_possible_exit_on_error(MEMDBG_HANDLE h, int exit_on_any_leaks) { /* ok, we do several things. * 1 walk allocation change, showing any memory 'newer' than in the handle (not tiny alloc stuff). * 2 validate allocation chain (and free chain if in extra mode). * if there were any errors in #2, then exit. * if any memory leaks (#1) and exit_on_any_leaks true, we also exit. */ MEMDBG_HDR *p = memlist; int leak = 0; /* first step, walk allocation list, looking for leaks */ while (p) { if (p->mdbg_cnt > h.alloc_cnt) { if (!memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOST, 4)) { leak = 1; fprintf(stderr, "Mem leak: "LLu" bytes, alloc_num %d, file %s, line %d\n", (unsigned long long)p->mdbg_size, p->mdbg_cnt, p->mdbg_file, p->mdbg_line); } //else fprintf(stderr, "Mem : "LLu" bytes, alloc_num %d, file %s, line %d\n", (unsigned long long)p->mdbg_size, p->mdbg_cnt, p->mdbg_file, p->mdbg_line); } p = p->mdbg_next; } MemDbg_Validate_msg2(3, "MEMDBG_checkSnapshot", 0); if (leak) { exit(1); } } /* MUST be thread safe */ void MEMDBG_tag_mem_from_alloc_tiny(void *ptr) { MEMDBG_HDR *p; p = CLIENT_2_HDR(ptr); #ifdef _OPENMP #pragma omp critical (memdbg_crit) #endif { if (!memcmp(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOST, 4)) { memcpy(p->mdbg_hdr1->mdbg_fpst, cpMEMFPOSTt, 4); mem_size -= p->mdbg_size; mem_sizet += p->mdbg_size; if (mem_sizet > max_mem_sizet) max_mem_sizet = mem_sizet; } } } static void mem_fence_post_err_fp(void *p, const char *file, int line, char *fp, int line2) { mem_fence_post_err_ne_fp(p, file, line,fp,line2); MemDbg_Display(stderr); exit(1); } static void mem_fence_post_errd_fp(void *p, const char *file, int line, char *fp, int line2) { mem_fence_post_errd_ne_fp(p, file, line,fp,line2); MemDbg_Display(stderr); exit(1); } static void mem_fence_post_err_ne_fp(void *p, const char *file, int line, char *fp, int line2) { char buf[120], *cp=buf, *ip; int i; ip = (char*) p; for (i = 0; i < 16; ++i) { if (ip[i] >= ' ' && ip[i] <= '~') *cp++ = ip[i]; else *cp++ = '.'; } *cp++ = ' '; for (i = 0; i < 16; ++i) cp += sprintf(cp, " %02x", (unsigned char)ip[i]); fprintf(stderr, "Memory fence_post error - %p - %s(%d) (%d)\n\tdata: (%s)\n", p, file, line, line2, buf); } static void mem_fence_post_errd_ne_fp(void *p, const char *file, int line, char *fp, int line2) { fprintf(stderr, "Memory fence_postd error, memory double freed - %p - %s(%d) (%d)\n", p, file, line, line2); } #endif /* MEMDBG_ON */
coordination.c
/* * Copyright (C) 2017 by Benedict Paten (benedictpaten@gmail.com) * * Released under the MIT license, see LICENSE.txt */ #include "stRPHmm.h" // OpenMP #if defined(_OPENMP) #include <omp.h> #define CELL_BUFFER_SIZE 1000 #endif /* * Functions to create a set of read partitioning HMMs that include a given input set of reads. */ stRPHmm *getNextClosestNonoverlappingHmm(stRPHmm *hmm1, stSortedSet *readHmms) { /* * Returns the HMM from the set readHmms that does not overlap hmm1 * but whose start coordinate is closest to * the end coordinate of hmm1. If does not exist returns NULL. */ // Iterator in the set starting from hmm1 assert(stSortedSet_search(readHmms, hmm1) == hmm1); stSortedSetIterator *it = stSortedSet_getIteratorFrom(readHmms, hmm1); stRPHmm *hmm2 = stSortedSet_getNext(it); assert(hmm2 == hmm1); // For each hmm in readHmms whose coordinate is >= than hmm1's while((hmm2 = stSortedSet_getNext(it)) != NULL) { // Compare the hmm coordinates just to check that hmm2 has a coordinate >= to hmm1s int i = stRPHmm_cmpFn(hmm1, hmm2); assert(i <= 0); // If hmm1 and hmm2 are on different references, then hmm2 is the closest non-overlapping // hmm to hmm1 in reference space i = strcmp(hmm1->referenceName, hmm2->referenceName); if(i != 0) { break; } // If hmm2 does not overlap hmm1 it must be the closest non-overlapping hmm to hmm1 if(hmm1->refStart + hmm1->refLength <= hmm2->refStart) { break; } } // Cleanup stSortedSet_destructIterator(it); return hmm2; } stSortedSet *makeComponent(stRPHmm *hmm, stSet *components, stHash *componentsHash) { /* * Create a component containing hmm and add the component to components. */ stSortedSet *component = stSortedSet_construct3(stRPHmm_cmpFn, NULL); stSortedSet_insert(component, hmm); stSet_insert(components, component); assert(stHash_search(componentsHash, hmm) == NULL); stHash_insert(componentsHash, hmm, component); return component; } stSet *getOverlappingComponents(stList *tilingPath1, stList *tilingPath2) { /* * Two hmms overlap if their reference coordinate intervals overlaps. * The transitive closure of the overlap relation * partitions a set of hmms into connected components. * This function returns this partition for the hmms in tilingPath1 * and tilingPath2, each of which is a set of hmms sorted by reference * coordinate and which do not overlap in reference * coordinates. Each component is a stSortedSet. */ // A map of hmms to components stHash *componentsHash = stHash_construct(); // The set of components stSet *components = stSet_construct2((void (*)(void *))stSortedSet_destruct); // The "lagging" index of the hmm in tilingPath2 that could possibly overlap hmm1 int64_t j = 0; // For each hmm in tilingPath1, in order for(int64_t i=0; i<stList_length(tilingPath1); i++) { stRPHmm *hmm1 = stList_get(tilingPath1, i); // Start with the component being undefined stSortedSet *component = NULL; // The "leading" index of the hmm in tilingPath2 that could possibly overlap hmm1 int64_t k = 0; // While there exists an hmm in tilingPath2 that precedes or overlaps with hmm1 while(j+k<stList_length(tilingPath2)) { stRPHmm *hmm2 = stList_get(tilingPath2, j+k); // Note the j+k // If hmm1 and hmm2 overlap if(stRPHmm_overlapOnReference(hmm1, hmm2)) { // The leading index is increased k++; // If component is still NULL if(component == NULL) { // Look for a component for hmm2 component = stHash_search(componentsHash, hmm2); // If hmm2 has no component make one if(component == NULL) { component = makeComponent(hmm2, components, componentsHash); } // Add hmm1 to the component assert(stSortedSet_search(component, hmm1) == NULL); assert(stHash_search(componentsHash, hmm1) == NULL); stSortedSet_insert(component, hmm1); stHash_insert(componentsHash, hmm1, component); } // Otherwise component is defined else { // Add hmm2 to the component assert(stSortedSet_search(component, hmm2) == NULL); assert(stHash_search(componentsHash, hmm2) == NULL); // Impossible to be defined, // as implies that two // hmms in tilingPath2 each both overlap two hmms in tilingPath1. stSortedSet_insert(component, hmm2); stHash_insert(componentsHash, hmm2, component); } } // Else hmm1 and hmm2 do not overlap else { // If hmm1 occurs before hmm2 in the reference ordering if(stRPHmm_cmpFn(hmm1, hmm2) < 0) { // If has no component, make a trivial component containing just hmm1 // (it doesn't overlap with any other hmm) if(component == NULL) { component = makeComponent(hmm1, components, componentsHash); } // Done with hmm1 break; } // else hmm2 occurs before hmm1 in the reference ordering else { // Add hmm2 to a trivial component if it does not overlap an HMM in tiling path1 if(stHash_search(componentsHash, hmm2) == NULL) { makeComponent(hmm2, components, componentsHash); } // Increase the lagging index as hmm1 and proceding hmms can not overlap with hmm2 j++; } } } if(component == NULL) { // assert(stHash_search(componentsHash, hmm1) == NULL); makeComponent(hmm1, components, componentsHash); } } // For any remaining hmms in tilingPath2 that have not been placed in a component // put them in a component while(j < stList_length(tilingPath2)) { stRPHmm *hmm2 = stList_get(tilingPath2, j++); if(stHash_search(componentsHash, hmm2) == NULL) { makeComponent(hmm2, components, componentsHash); } } // Cleanup stHash_destruct(componentsHash); return components; } stList *getTilingPaths(stSortedSet *hmms) { /* * Takes set of hmms ordered by reference coordinate (see stRPHmm_cmpFn) and returns * a list of tiling paths. Each tiling path consisting of maximal sequences of hmms * that do not overlap. Destroys sortedSet in the process. */ stList *tilingPaths = stList_construct(); while(stSortedSet_size(hmms) > 0) { // Make an empty tiling path and add to set of tiling paths built so far stList *tilingPath = stList_construct(); stList_append(tilingPaths, tilingPath); // Get the hmm with lowest reference coordinate and add to the tiling path stRPHmm *hmm = stSortedSet_getFirst(hmms); assert(hmm != NULL); assert(stSortedSet_search(hmms, hmm) == hmm); stList_append(tilingPath, hmm); // While it exists, get the next closest non-overlapping hmm // and add to the tiling path progressively, removing the chain of hmms from the // set of hmms left to tile stRPHmm *hmm2; while((hmm2 = getNextClosestNonoverlappingHmm(hmm, hmms)) != NULL) { stSortedSet_remove(hmms, hmm); stList_append(tilingPath, hmm2); hmm = hmm2; assert(stSortedSet_search(hmms, hmm) == hmm); } stSortedSet_remove(hmms, hmm); } // Cleanup the input set stSortedSet_destruct(hmms); return tilingPaths; } stList *getTilingPaths2(stList *profileSeqs, stHash *referenceNamesToReferencePriors, stRPHmmParameters *params) { /* * Takes a set of profile sequences (stProfileSeq) and returns * a list of tiling paths. Each tiling path consisting of maximal sequences of hmms * that do not overlap. */ // Create a read partitioning HMM for every sequence and put in ordered set, ordered by reference coordinate stSortedSet *readHmms = stSortedSet_construct3(stRPHmm_cmpFn, (void (*)(void *))stRPHmm_destruct2); for(int64_t i=0; i<stList_length(profileSeqs); i++) { stProfileSeq *pSeq = stList_get(profileSeqs, i); stRPHmm *hmm = stRPHmm_construct(pSeq, stHash_search(referenceNamesToReferencePriors, pSeq->referenceName), params); stSortedSet_insert(readHmms, hmm); } assert(stSortedSet_size(readHmms) == stList_length(profileSeqs)); // Organise HMMs into "tiling paths" consisting of sequences of hmms that do not overlap return getTilingPaths(readHmms); } stRPHmm *fuseTilingPath(stList *tilingPath) { /* * Fuse together the hmms in the tiling path into one hmm. * Destroys the tiling path and cleans it up. */ stRPHmm *rightHmm = stList_pop(tilingPath); // While there remain other hmms in the list fuse them together while(stList_length(tilingPath) > 0) { stRPHmm *leftHmm = stList_pop(tilingPath); rightHmm = stRPHmm_fuse(leftHmm, rightHmm); } // Cleanup stList_destruct(tilingPath); return rightHmm; } stList *mergeTwoTilingPaths(stList *tilingPath1, stList *tilingPath2) { /* * Takes two lists, tilingPath1 and tilingPath2, each of which is a set of hmms * ordered by reference coordinates and * non-overlapping in reference coordinates. * Merges together the hmms and returns a single tiling path as a result in the * same format as the input lists. * Destroys the input tilingPaths in the process and cleans them up. */ // Partition of the hmms into overlapping connected components stSet *components = getOverlappingComponents(tilingPath1, tilingPath2); // Cleanup the input tiling paths stList_destruct(tilingPath1); stList_destruct(tilingPath2); // The output tiling path, which starts out empty stList *newTilingPath = stList_construct(); // Fuse the hmms // For each component of overlapping hmms stList *componentsList = stSet_getList(components); for(int64_t i=0; i<stList_length(componentsList); i++) { stSortedSet *component = stList_get(componentsList, i); stSet_remove(components, component); // Make two sub-tiling paths (there can only be two maximal paths, by definition) stList *tilingPaths = getTilingPaths(component); stRPHmm *hmm = NULL; if(stList_length(tilingPaths) == 2) { stList *subTilingPath1 = stList_get(tilingPaths, 0); stList *subTilingPath2 = stList_get(tilingPaths, 1); // Fuse the hmms in each sub tiling path stRPHmm *hmm1 = fuseTilingPath(subTilingPath1); stRPHmm *hmm2 = fuseTilingPath(subTilingPath2); // Align stRPHmm_alignColumns(hmm1, hmm2); // Merge hmm = stRPHmm_createCrossProductOfTwoAlignedHmm(hmm1, hmm2); stRPHmm_destruct(hmm1, 1); stRPHmm_destruct(hmm2, 1); // Prune stRPHmm_forwardBackward(hmm); stRPHmm_prune(hmm); } else { // Case that component is just one hmm that does not // overlap anything else assert(stList_length(tilingPaths) == 1); stList *subTilingPath1 = stList_get(tilingPaths, 0); assert(stList_length(subTilingPath1) == 1); hmm = stList_pop(subTilingPath1); stList_destruct(subTilingPath1); } // Add to output tiling path stList_append(newTilingPath, hmm); stList_destruct(tilingPaths); } //Cleanup stList_destruct(componentsList); stSet_destruct(components); // Sort new tiling path stList_sort(newTilingPath, stRPHmm_cmpFn); return newTilingPath; } stList *mergeTilingPaths(stList *tilingPaths) { /* * Like mergeTwoTilingPaths(), except instead of just two tiling paths it takes a list. * Destroys the tiling path as it goes. */ // If no tiling paths in input warn and return an empty tiling path if(stList_length(tilingPaths) == 0) { st_logCritical("WARNING: Zero tiling paths to merge\n"); stList_destruct(tilingPaths); return stList_construct(); } // If only one tiling path in the input, the output is just the single input tiling path if(stList_length(tilingPaths) == 1) { stList *tilingPath = stList_get(tilingPaths, 0); stList_destruct(tilingPaths); return tilingPath; } stList *tilingPath1; stList *tilingPath2; // If there are more than two tiling paths // split the problem into two recursively until there are just two remaining // tiling paths if(stList_length(tilingPaths) > 2) { // Recursively turn the first half of the tiling paths into one tiling path stList *tilingPaths1 = stList_construct(); for(int64_t i=0; i<stList_length(tilingPaths)/2; i++) { stList_append(tilingPaths1, stList_get(tilingPaths, i)); } // Recursively turn the other half of the tiling paths into the other tiling path stList *tilingPaths2 = stList_construct(); for(int64_t i=stList_length(tilingPaths)/2; i < stList_length(tilingPaths); i++) { stList_append(tilingPaths2, stList_get(tilingPaths, i)); } #if defined(_OPENMP) #pragma omp parallel { #pragma omp sections nowait { #pragma omp section tilingPath1 = mergeTilingPaths(tilingPaths1); #pragma omp section tilingPath2 = mergeTilingPaths(tilingPaths2); } } #else tilingPath1 = mergeTilingPaths(tilingPaths1); tilingPath2 = mergeTilingPaths(tilingPaths2); #endif } // Otherwise the number of tiling paths is two else { tilingPath1 = stList_get(tilingPaths, 0); tilingPath2 = stList_get(tilingPaths, 1); } // Merge together the two tiling paths and return result assert(tilingPath1 != NULL); assert(tilingPath2 != NULL); stList_destruct(tilingPaths); return mergeTwoTilingPaths(tilingPath1, tilingPath2); } static void getProfileSeqs(stList *tilingPath, stList *pSeqs) { while(stList_length(tilingPath) > 0) { stRPHmm *hmm = stList_pop(tilingPath); assert(stList_length(hmm->profileSeqs) == 1); stProfileSeq *pSeq = stList_peek(hmm->profileSeqs); stRPHmm_destruct(hmm, 1); stList_append(pSeqs, pSeq); } stList_destruct(tilingPath); } stList *filterReadsByCoverageDepth(stList *profileSeqs, stRPHmmParameters *params, stList *filteredProfileSeqs, stList *discardedProfileSeqs, stHash *referenceNamesToReferencePriors) { /* * Takes a set of profile sequences and returns a subset such that maximum coverage depth of the subset is * less than or equal to params->maxCoverageDepth. The discarded sequences are placed in the list * "discardedProfileSeqs", the retained sequences are placed in filteredProfileSeqs. */ // Create a set of tiling paths stList *tilingPaths = getTilingPaths2(profileSeqs, referenceNamesToReferencePriors, params); // Eliminate reads until the maximum coverage depth to less than the give threshold while(stList_length(tilingPaths) > params->maxCoverageDepth) { getProfileSeqs(stList_pop(tilingPaths), discardedProfileSeqs); } while(stList_length(tilingPaths) > 0) { getProfileSeqs(stList_pop(tilingPaths), filteredProfileSeqs); } // Cleanup stList_destruct(tilingPaths); return filteredProfileSeqs; } stList *getRPHmms(stList *profileSeqs, stHash *referenceNamesToReferencePriors, stRPHmmParameters *params) { /* * Takes a set of profile sequences (stProfileSeq) and returns a list of read partitioning * hmms (stRPHmm) ordered and non-overlapping in reference coordinates. * referenceNamesToReferencePriors is a map from reference sequence names to corresponding * stReferencePriorProbs objects. */ // Create a read partitioning HMM for every sequence and put in ordered set, ordered by reference coordinate stList *tilingPaths = getTilingPaths2(profileSeqs, referenceNamesToReferencePriors, params); if(stList_length(tilingPaths) > MAX_READ_PARTITIONING_DEPTH || stList_length(tilingPaths) > params->maxCoverageDepth) { st_errAbort("\nCoverage depth: read depth of %" PRIi64 " exceeds hard maximum of %" PRIi64 " with configured maximum of %" PRIi64 "\n", stList_length(tilingPaths), MAX_READ_PARTITIONING_DEPTH, params->maxCoverageDepth); } // Merge together the tiling paths into one merged tiling path, merging the individual hmms when // they overlap on the reference stList *finalTilingPath = mergeTilingPaths(tilingPaths); stList_setDestructor(finalTilingPath, (void (*)(void *))stRPHmm_destruct2); return finalTilingPath; }
pooh.c
#include<stdio.h> #include<omp.h> void pooh(int id, double A[]){ } int main() { double A[1000]; omp_set_num_threads(4); #pragma omp parallel { int ID = omp_get_thread_num(); printf("%d\n", ID); pooh(ID, A); } printf("All done!\n"); return 0; }
attribute.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % AAA TTTTT TTTTT RRRR IIIII BBBB U U TTTTT EEEEE % % A A T T R R I B B U U T E % % AAAAA T T RRRR I BBBB U U T EEE % % A A T T R R I B B U U T E % % A A T T R R IIIII BBBB UUU T EEEEE % % % % % % MagickCore Get / Set Image Attributes % % % % Software Design % % John Cristy % % October 2002 % % % % % % Copyright 1999-2011 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % http://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/attribute.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/geometry.h" #include "magick/histogram.h" #include "magick/identify.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/magick.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/paint.h" #include "magick/pixel.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/segment.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/thread-private.h" #include "magick/threshold.h" #include "magick/transform.h" #include "magick/utility.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t I m a g e B o u n d i n g B o x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageBoundingBox() returns the bounding box of an image canvas. % % The format of the GetImageBoundingBox method is: % % RectangleInfo GetImageBoundingBox(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o bounds: Method GetImageBoundingBox returns the bounding box of an % image canvas. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport RectangleInfo GetImageBoundingBox(const Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; MagickPixelPacket target[3], zero; RectangleInfo bounds; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); bounds.width=0; bounds.height=0; bounds.x=(ssize_t) image->columns; bounds.y=(ssize_t) image->rows; GetMagickPixelPacket(image,&target[0]); image_view=AcquireCacheView(image); p=GetCacheViewVirtualPixels(image_view,0,0,1,1,exception); if (p == (const PixelPacket *) NULL) { image_view=DestroyCacheView(image_view); return(bounds); } SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view), &target[0]); GetMagickPixelPacket(image,&target[1]); p=GetCacheViewVirtualPixels(image_view,(ssize_t) image->columns-1,0,1,1, exception); SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view), &target[1]); GetMagickPixelPacket(image,&target[2]); p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-1,1,1, exception); SetMagickPixelPacket(image,p,GetCacheViewAuthenticIndexQueue(image_view), &target[2]); status=MagickTrue; GetMagickPixelPacket(image,&zero); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickPixelPacket pixel; RectangleInfo bounding_box; register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; if (status == MagickFalse) continue; #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif bounding_box=bounds; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); pixel=zero; for (x=0; x < (ssize_t) image->columns; x++) { SetMagickPixelPacket(image,p,indexes+x,&pixel); if ((x < bounding_box.x) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.x=x; if ((x > (ssize_t) bounding_box.width) && (IsMagickColorSimilar(&pixel,&target[1]) == MagickFalse)) bounding_box.width=(size_t) x; if ((y < bounding_box.y) && (IsMagickColorSimilar(&pixel,&target[0]) == MagickFalse)) bounding_box.y=y; if ((y > (ssize_t) bounding_box.height) && (IsMagickColorSimilar(&pixel,&target[2]) == MagickFalse)) bounding_box.height=(size_t) y; p++; } #if defined(MAGICKCORE_OPENMP_SUPPORT) # pragma omp critical (MagickCore_GetImageBoundingBox) #endif { if (bounding_box.x < bounds.x) bounds.x=bounding_box.x; if (bounding_box.y < bounds.y) bounds.y=bounding_box.y; if (bounding_box.width > bounds.width) bounds.width=bounding_box.width; if (bounding_box.height > bounds.height) bounds.height=bounding_box.height; } } image_view=DestroyCacheView(image_view); if ((bounds.width == 0) || (bounds.height == 0)) (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); else { bounds.width-=(bounds.x-1); bounds.height-=(bounds.y-1); } return(bounds); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageChannelDepth() returns the depth of a particular image channel. % % The format of the GetImageChannelDepth method is: % % size_t GetImageDepth(const Image *image,ExceptionInfo *exception) % size_t GetImageChannelDepth(const Image *image, % const ChannelType channel,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o exception: return any errors or warnings in this structure. % */ MagickExport size_t GetImageDepth(const Image *image,ExceptionInfo *exception) { return(GetImageChannelDepth(image,CompositeChannels,exception)); } MagickExport size_t GetImageChannelDepth(const Image *image, const ChannelType channel,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t id; size_t *current_depth, depth, number_threads; ssize_t y; /* Compute image depth. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); number_threads=GetOpenMPMaximumThreads(); current_depth=(size_t *) AcquireQuantumMemory(number_threads, sizeof(*current_depth)); if (current_depth == (size_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); status=MagickTrue; for (id=0; id < (ssize_t) number_threads; id++) current_depth[id]=1; if ((image->storage_class == PseudoClass) && (image->matte == MagickFalse)) { register const PixelPacket *restrict p; register ssize_t i; p=image->colormap; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < (ssize_t) image->colors; i++) { const int id = GetOpenMPThreadId(); if (status == MagickFalse) continue; while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickStatusType status; QuantumAny range; status=0; range=GetQuantumRange(current_depth[id]); if ((channel & RedChannel) != 0) status|=GetRedPixelComponent(p) != ScaleAnyToQuantum(ScaleQuantumToAny(GetRedPixelComponent(p), range),range); if ((channel & GreenChannel) != 0) status|=GetGreenPixelComponent(p) != ScaleAnyToQuantum(ScaleQuantumToAny(GetGreenPixelComponent(p), range),range); if ((channel & BlueChannel) != 0) status|=GetBluePixelComponent(p) != ScaleAnyToQuantum(ScaleQuantumToAny(GetBluePixelComponent(p), range),range); if (status == 0) break; current_depth[id]++; } p++; } depth=current_depth[0]; for (id=1; id < (ssize_t) number_threads; id++) if (depth < current_depth[id]) depth=current_depth[id]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register const IndexPacket *restrict indexes; register const PixelPacket *restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { while (current_depth[id] < MAGICKCORE_QUANTUM_DEPTH) { MagickStatusType status; QuantumAny range; status=0; range=GetQuantumRange(current_depth[id]); if ((channel & RedChannel) != 0) status|=GetRedPixelComponent(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetRedPixelComponent(p),range),range); if ((channel & GreenChannel) != 0) status|=GetGreenPixelComponent(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetGreenPixelComponent(p),range),range); if ((channel & BlueChannel) != 0) status|=GetBluePixelComponent(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetBluePixelComponent(p),range),range); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) status|=GetOpacityPixelComponent(p) != ScaleAnyToQuantum( ScaleQuantumToAny(GetOpacityPixelComponent(p),range),range); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) status|=GetIndexPixelComponent(indexes+x) != ScaleAnyToQuantum(ScaleQuantumToAny(GetIndexPixelComponent(indexes+ x),range),range); if (status == 0) break; current_depth[id]++; } p++; } if (current_depth[id] == MAGICKCORE_QUANTUM_DEPTH) status=MagickFalse; } image_view=DestroyCacheView(image_view); depth=current_depth[0]; for (id=1; id < (ssize_t) number_threads; id++) if (depth < current_depth[id]) depth=current_depth[id]; current_depth=(size_t *) RelinquishMagickMemory(current_depth); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e Q u a n t u m D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageQuantumDepth() returns the depth of the image rounded to a legal % quantum depth: 8, 16, or 32. % % The format of the GetImageQuantumDepth method is: % % size_t GetImageQuantumDepth(const Image *image, % const MagickBooleanType constrain) % % A description of each parameter follows: % % o image: the image. % % o constrain: A value other than MagickFalse, constrains the depth to % a maximum of MAGICKCORE_QUANTUM_DEPTH. % */ static inline double MagickMin(const double x,const double y) { if (x < y) return(x); return(y); } MagickExport size_t GetImageQuantumDepth(const Image *image, const MagickBooleanType constrain) { size_t depth; depth=image->depth; if (depth <= 8) depth=8; else if (depth <= 16) depth=16; else if (depth <= 32) depth=32; else if (depth <= 64) depth=64; if (constrain != MagickFalse) depth=(size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH); return(depth); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T y p e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageType() returns the potential type of image: % % Bilevel Grayscale GrayscaleMatte % Palette PaletteMatte TrueColor % TrueColorMatte ColorSeparation ColorSeparationMatte % % To ensure the image type matches its potential, use SetImageType(): % % (void) SetImageType(image,GetImageType(image)); % % The format of the GetImageType method is: % % ImageType GetImageType(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport ImageType GetImageType(const Image *image,ExceptionInfo *exception) { assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->colorspace == CMYKColorspace) { if (image->matte == MagickFalse) return(ColorSeparationType); return(ColorSeparationMatteType); } if (IsMonochromeImage(image,exception) != MagickFalse) return(BilevelType); if (IsGrayImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(GrayscaleMatteType); return(GrayscaleType); } if (IsPaletteImage(image,exception) != MagickFalse) { if (image->matte != MagickFalse) return(PaletteMatteType); return(PaletteType); } if (image->matte != MagickFalse) return(TrueColorMatteType); return(TrueColorType); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s G r a y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsGrayImage() returns MagickTrue if all the pixels in the image have the % same red, green, and blue intensities. % % The format of the IsGrayImage method is: % % MagickBooleanType IsGrayImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsGrayImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register const PixelPacket *p; register ssize_t x; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((image->type == BilevelType) || (image->type == GrayscaleType) || (image->type == GrayscaleMatteType)) return(MagickTrue); if (image->colorspace == CMYKColorspace) return(MagickFalse); type=BilevelType; image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsGrayPixel(p) == MagickFalse) { type=UndefinedType; break; } if ((type == BilevelType) && (IsMonochromePixel(p) == MagickFalse)) type=GrayscaleType; p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); ((Image *) image)->type=type; if ((type == GrayscaleType) && (image->matte != MagickFalse)) ((Image *) image)->type=GrayscaleMatteType; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M o n o c h r o m e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMonochromeImage() returns MagickTrue if all the pixels in the image have % the same red, green, and blue intensities and the intensity is either % 0 or QuantumRange. % % The format of the IsMonochromeImage method is: % % MagickBooleanType IsMonochromeImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsMonochromeImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; ImageType type; register ssize_t x; register const PixelPacket *p; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->type == BilevelType) return(MagickTrue); if (image->colorspace == CMYKColorspace) return(MagickFalse); type=BilevelType; image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsMonochromePixel(p) == MagickFalse) { type=UndefinedType; break; } p++; } if (type == UndefinedType) break; } image_view=DestroyCacheView(image_view); if (type == UndefinedType) return(MagickFalse); ((Image *) image)->type=type; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsOpaqueImage() returns MagickTrue if none of the pixels in the image have % an opacity value other than opaque (0). % % The format of the IsOpaqueImage method is: % % MagickBooleanType IsOpaqueImage(const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType IsOpaqueImage(const Image *image, ExceptionInfo *exception) { CacheView *image_view; register const PixelPacket *p; register ssize_t x; ssize_t y; /* Determine if image is opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->matte == MagickFalse) return(MagickTrue); image_view=AcquireCacheView(image); for (y=0; y < (ssize_t) image->rows; y++) { p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetOpacityPixelComponent(p) != OpaqueOpacity) break; p++; } if (x < (ssize_t) image->columns) break; } image_view=DestroyCacheView(image_view); return(y < (ssize_t) image->rows ? MagickFalse : MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e C h a n n e l D e p t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageChannelDepth() sets the depth of the image. % % The format of the SetImageChannelDepth method is: % % MagickBooleanType SetImageDepth(Image *image,const size_t depth) % MagickBooleanType SetImageChannelDepth(Image *image, % const ChannelType channel,const size_t depth) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o depth: the image depth. % */ MagickExport MagickBooleanType SetImageDepth(Image *image, const size_t depth) { return(SetImageChannelDepth(image,CompositeChannels,depth)); } MagickExport MagickBooleanType SetImageChannelDepth(Image *image, const ChannelType channel,const size_t depth) { CacheView *image_view; ExceptionInfo *exception; MagickBooleanType status; QuantumAny range; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickSignature); if (GetImageDepth(image,&image->exception) <= (size_t) MagickMin((double) depth,(double) MAGICKCORE_QUANTUM_DEPTH)) { image->depth=depth; return(MagickTrue); } /* Scale pixels to desired depth. */ status=MagickTrue; range=GetQuantumRange(depth); exception=(&image->exception); image_view=AcquireCacheView(image); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *restrict indexes; register ssize_t x; register PixelPacket *restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewAuthenticIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { if ((channel & RedChannel) != 0) SetRedPixelComponent(q,ScaleAnyToQuantum(ScaleQuantumToAny( GetRedPixelComponent(q),range),range)); if ((channel & GreenChannel) != 0) SetGreenPixelComponent(q,ScaleAnyToQuantum(ScaleQuantumToAny( GetGreenPixelComponent(q),range),range)); if ((channel & BlueChannel) != 0) SetBluePixelComponent(q,ScaleAnyToQuantum(ScaleQuantumToAny( GetBluePixelComponent(q),range),range)); if (((channel & OpacityChannel) != 0) && (image->matte != MagickFalse)) SetOpacityPixelComponent(q,ScaleAnyToQuantum(ScaleQuantumToAny( GetOpacityPixelComponent(q),range),range)); if (((channel & IndexChannel) != 0) && (image->colorspace == CMYKColorspace)) SetIndexPixelComponent(indexes+x,ScaleAnyToQuantum(ScaleQuantumToAny( GetIndexPixelComponent(indexes+x),range),range)); q++; } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) { status=MagickFalse; continue; } } image_view=DestroyCacheView(image_view); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelPacket *restrict p; p=image->colormap; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(dynamic,4) shared(status) #endif for (i=0; i < (ssize_t) image->colors; i++) { if ((channel & RedChannel) != 0) p->red=ScaleAnyToQuantum(ScaleQuantumToAny(p->red,range),range); if ((channel & GreenChannel) != 0) p->green=ScaleAnyToQuantum(ScaleQuantumToAny(p->green,range),range); if ((channel & BlueChannel) != 0) p->blue=ScaleAnyToQuantum(ScaleQuantumToAny(p->blue,range),range); if ((channel & OpacityChannel) != 0) p->opacity=ScaleAnyToQuantum(ScaleQuantumToAny(p->opacity,range), range); p++; } } image->depth=depth; return(status); }
pragmatest.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> int main(int argc, char *argv[]) { int N = atoi(argv[1]); float sum = 0; #pragma omp parallel for reduction( +:sum) for (int n=0; n<N;n++) { sum = sum + n; } printf("sum = %f \n",sum); }
RefGen21.h
/****************************************************************/ /* Parallel Combinatorial BLAS Library (for Graph Computations) */ /* version 1.4 -------------------------------------------------*/ /* date: 1/17/2014 ---------------------------------------------*/ /* authors: Aydin Buluc (abuluc@lbl.gov), Adam Lugowski --------*/ /****************************************************************/ /* Copyright (c) 2010-2014, The Regents of the University of California Permission is hereby granted, free of charge, to any person obtaining a std::copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, std::copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /** * Deterministic vertex scrambling functions from V2.1 of the reference implementation **/ #ifndef _REF_GEN_2_1_H_ #define _REF_GEN_2_1_H_ #ifdef _STDINT_H #undef _STDINT_H #endif #ifdef _GCC_STDINT_H // for cray #undef _GCC_STDINT_H // original stdint does #include_next<"/opt/gcc/4.5.2/snos/lib/gcc/x86_64-suse-linux/4.5.2/include/stdint-gcc.h"> #endif #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #include <stdint.h> #include <inttypes.h> #include <errno.h> #include<vector> #include <limits> #include "SpDefs.h" #include "StackEntry.h" #include "promote.h" #include "Isect.h" #include "HeapEntry.h" #include "SpImpl.h" #include "graph500-1.2/generator/graph_generator.h" #include "graph500-1.2/generator/utils.h" /* Initiator settings: for faster random number generation, the initiator * probabilities are defined as fractions (a = INITIATOR_A_NUMERATOR / * INITIATOR_DENOMINATOR, b = c = INITIATOR_BC_NUMERATOR / * INITIATOR_DENOMINATOR, d = 1 - a - b - c. */ #define INITIATOR_A_NUMERATOR 5700 #define INITIATOR_BC_NUMERATOR 1900 #define INITIATOR_DENOMINATOR 10000 /* If this macro is defined to a non-zero value, use SPK_NOISE_LEVEL / * INITIATOR_DENOMINATOR as the noise parameter to use in introducing noise * into the graph parameters. The approach used is from "A Hitchhiker's Guide * to Choosing Parameters of Stochastic Kronecker Graphs" by C. Seshadhri, Ali * Pinar, and Tamara G. Kolda (http://arxiv.org/abs/1102.5046v1), except that * the adjustment here is chosen based on the current level being processed * rather than being chosen randomly. */ #define SPK_NOISE_LEVEL 0 /* #define SPK_NOISE_LEVEL 1000 -- in INITIATOR_DENOMINATOR units */ namespace combblas { class RefGen21 { public: /* Spread the two 64-bit numbers into five nonzero values in the correct range (2 parameter version) */ static void make_mrg_seed_short(uint64_t userseed, uint_fast32_t* seed) { seed[0] = (userseed & 0x3FFFFFFF) + 1; seed[1] = ((userseed >> 30) & 0x3FFFFFFF) + 1; seed[2] = (userseed & 0x3FFFFFFF) + 1; seed[3] = ((userseed >> 30) & 0x3FFFFFFF) + 1; seed[4] = ((userseed >> 60) << 4) + (userseed >> 60) + 1; } static int generate_4way_bernoulli(mrg_state* st, int level, int nlevels) { /* Generator a pseudorandom number in the range [0, INITIATOR_DENOMINATOR) without modulo bias. */ static const uint32_t limit = (UINT32_C(0xFFFFFFFF) % INITIATOR_DENOMINATOR); uint32_t val = mrg_get_uint_orig(st); if (/* Unlikely */ val < limit) { do { val = mrg_get_uint_orig(st); } while (val < limit); } #if SPK_NOISE_LEVEL == 0 int spk_noise_factor = 0; #else int spk_noise_factor = 2 * SPK_NOISE_LEVEL * level / nlevels - SPK_NOISE_LEVEL; #endif int adjusted_bc_numerator = INITIATOR_BC_NUMERATOR + spk_noise_factor; val %= INITIATOR_DENOMINATOR; if ((signed)val < adjusted_bc_numerator) return 1; val -= adjusted_bc_numerator; if ((signed)val < adjusted_bc_numerator) return 2; val -= adjusted_bc_numerator; #if SPK_NOISE_LEVEL == 0 if (val < INITIATOR_A_NUMERATOR) return 0; #else if (val < INITIATOR_A_NUMERATOR * (INITIATOR_DENOMINATOR - 2 * INITIATOR_BC_NUMERATOR) / (INITIATOR_DENOMINATOR - 2 * adjusted_bc_numerator)) return 0; #endif return 3; } /* Reverse bits in a number; this should be optimized for performance * (including using bit- or byte-reverse intrinsics if your platform has them). * */ static inline uint64_t bitreverse(uint64_t x) { #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3) #define USE_GCC_BYTESWAP /* __builtin_bswap* are in 4.3 but not 4.2 */ #endif #ifdef FAST_64BIT_ARITHMETIC /* 64-bit code */ #ifdef USE_GCC_BYTESWAP x = __builtin_bswap64(x); #else x = (x >> 32) | (x << 32); x = ((x >> 16) & UINT64_C(0x0000FFFF0000FFFF)) | ((x & UINT64_C(0x0000FFFF0000FFFF)) << 16); x = ((x >> 8) & UINT64_C(0x00FF00FF00FF00FF)) | ((x & UINT64_C(0x00FF00FF00FF00FF)) << 8); #endif x = ((x >> 4) & UINT64_C(0x0F0F0F0F0F0F0F0F)) | ((x & UINT64_C(0x0F0F0F0F0F0F0F0F)) << 4); x = ((x >> 2) & UINT64_C(0x3333333333333333)) | ((x & UINT64_C(0x3333333333333333)) << 2); x = ((x >> 1) & UINT64_C(0x5555555555555555)) | ((x & UINT64_C(0x5555555555555555)) << 1); return x; #else /* 32-bit code */ uint32_t h = (uint32_t)(x >> 32); uint32_t l = (uint32_t)(x & UINT32_MAX); #ifdef USE_GCC_BYTESWAP h = __builtin_bswap32(h); l = __builtin_bswap32(l); #else h = (h >> 16) | (h << 16); l = (l >> 16) | (l << 16); h = ((h >> 8) & UINT32_C(0x00FF00FF)) | ((h & UINT32_C(0x00FF00FF)) << 8); l = ((l >> 8) & UINT32_C(0x00FF00FF)) | ((l & UINT32_C(0x00FF00FF)) << 8); #endif h = ((h >> 4) & UINT32_C(0x0F0F0F0F)) | ((h & UINT32_C(0x0F0F0F0F)) << 4); l = ((l >> 4) & UINT32_C(0x0F0F0F0F)) | ((l & UINT32_C(0x0F0F0F0F)) << 4); h = ((h >> 2) & UINT32_C(0x33333333)) | ((h & UINT32_C(0x33333333)) << 2); l = ((l >> 2) & UINT32_C(0x33333333)) | ((l & UINT32_C(0x33333333)) << 2); h = ((h >> 1) & UINT32_C(0x55555555)) | ((h & UINT32_C(0x55555555)) << 1); l = ((l >> 1) & UINT32_C(0x55555555)) | ((l & UINT32_C(0x55555555)) << 1); return ((uint64_t)l << 32) | h; /* Swap halves */ #endif } /* Apply a permutation to scramble vertex numbers; a randomly generated * permutation is not used because applying it at scale is too expensive. */ static inline int64_t scramble(int64_t v0, int lgN, uint64_t val0, uint64_t val1) { uint64_t v = (uint64_t)v0; v += val0 + val1; v *= (val0 | UINT64_C(0x4519840211493211)); v = (RefGen21::bitreverse(v) >> (64 - lgN)); assert ((v >> lgN) == 0); v *= (val1 | UINT64_C(0x3050852102C843A5)); v = (RefGen21::bitreverse(v) >> (64 - lgN)); assert ((v >> lgN) == 0); return (int64_t)v; } /* Make a single graph edge using a pre-set MRG state. */ static void make_one_edge(int64_t nverts, int level, int lgN, mrg_state* st, packed_edge* result, uint64_t val0, uint64_t val1) { int64_t base_src = 0, base_tgt = 0; while (nverts > 1) { int square = generate_4way_bernoulli(st, level, lgN); int src_offset = square / 2; int tgt_offset = square % 2; assert (base_src <= base_tgt); if (base_src == base_tgt) { /* Clip-and-flip for undirected graph */ if (src_offset > tgt_offset) { int temp = src_offset; src_offset = tgt_offset; tgt_offset = temp; } } nverts /= 2; ++level; base_src += nverts * src_offset; base_tgt += nverts * tgt_offset; } write_edge(result, scramble(base_src, lgN, val0, val1), scramble(base_tgt, lgN, val0, val1)); } static inline mrg_state MakeScrambleValues(uint64_t & val0, uint64_t & val1, const uint_fast32_t seed[]) { mrg_state state; mrg_seed(&state, seed); mrg_state new_state = state; mrg_skip(&new_state, 50, 7, 0); val0 = mrg_get_uint_orig(&new_state); val0 *= UINT64_C(0xFFFFFFFF); val0 += mrg_get_uint_orig(&new_state); val1 = mrg_get_uint_orig(&new_state); val1 *= UINT64_C(0xFFFFFFFF); val1 += mrg_get_uint_orig(&new_state); return state; } /* Generate a range of edges (from start_edge to end_edge of the total graph), * writing into elements [0, end_edge - start_edge) of the edges array. This * code is parallel on OpenMP, it must be used with separately-implemented SPMD parallelism for MPI. */ static void generate_kronecker_range( const uint_fast32_t seed[5] /* All values in [0, 2^31 - 1), not all zero */, int logN /* In base 2 */, int64_t start_edge, int64_t end_edge, packed_edge* edges) { int64_t nverts = (int64_t)1 << logN; uint64_t val0, val1; /* Values for scrambling */ mrg_state state = MakeScrambleValues(val0, val1, seed); #ifdef _OPENMP #pragma omp parallel for #endif for (int64_t ei = start_edge; ei < end_edge; ++ei) { mrg_state new_state = state; mrg_skip(&new_state, 0, ei, 0); make_one_edge(nverts, 0, logN, &new_state, edges + (ei - start_edge), val0, val1); } } static inline void compute_edge_range(int rank, int size, int64_t M, int64_t* start_idx, int64_t* end_idx) { int64_t rankc = (int64_t)(rank); int64_t sizec = (int64_t)(size); *start_idx = rankc * (M / sizec) + (rankc < (M % sizec) ? rankc : (M % sizec)); *end_idx = (rankc + 1) * (M / sizec) + (rankc + 1 < (M % sizec) ? rankc + 1 : (M % sizec)); } static inline void make_graph(int log_numverts, int64_t M, int64_t* nedges_ptr, packed_edge** result_ptr, MPI_Comm & world) { int rank, size; #ifdef DETERMINISTIC uint64_t userseed1 = 0; #else uint64_t userseed1 = (uint64_t) init_random(); #endif /* Spread the two 64-bit numbers into five nonzero values in the correct range. */ uint_fast32_t seed[5]; make_mrg_seed(userseed1, userseed1, seed); MPI_Comm_rank(world, &rank); MPI_Comm_size(world, &size); int64_t start_idx, end_idx; compute_edge_range(rank, size, M, &start_idx, &end_idx); int64_t nedges = end_idx - start_idx; packed_edge* local_edges = new packed_edge[nedges]; double start = MPI_Wtime(); generate_kronecker_range(seed, log_numverts, start_idx, end_idx, local_edges); double gen_time = MPI_Wtime() - start; *result_ptr = local_edges; *nedges_ptr = nedges; if (rank == 0) { fprintf(stdout, "graph_generation: %f s\n", gen_time); } } static inline long init_random () { long seed = -1; if (getenv ("SEED")) { errno = 0; seed = strtol (getenv ("SEED"), NULL, 10); if (errno) seed = -1; } if (seed < 0) seed = 0xDECAFBAD; return seed; } }; } #endif
jac_solv_simd.c
/* ** PROGRAM: jacobi Solver ** ** PURPOSE: This program will explore use of a jacobi iterative ** method to solve a system of linear equations (Ax= b). ** ** Here is the basic idea behind the method. Rewrite ** the matrix A as a Lower Triangular (L), upper triangular ** (U) and diagonal matrix (D) ** ** Ax = (L + D + U)x = b ** ** Carry out the multiplication and rearrange: ** ** Dx = b - (L+U)x --> x = (b-(L+U)x)/D ** ** We can do this iteratively ** ** x_new = (b-(L+U)x_old)/D ** ** USAGE: Run without arguments to use default SIZE. ** ** ./jac_solv ** ** Run with a single argument for the order of the A ** matrix ... for example ** ** ./jac_solv 2500 ** ** HISTORY: Written by Tim Mattson, Oct 2015 */ #include<omp.h> #include <stdlib.h> #include<math.h> #include "mm_utils.h" //a library of basic matrix utilities functions //and some key constants used in this program //(such as TYPE) #define TOLERANCE 0.001 #define DEF_SIZE 1000 #define MAX_ITERS 5000 #define LARGE 1000000.0 //#define DEBUG 1 // output a small subset of intermediate values //#define VERBOSE 1 int main(int argc, char **argv) { int Ndim; // A[Ndim][Ndim] int i,j, iters; double start_time, elapsed_time; TYPE conv, tmp, err, chksum; TYPE *A, *b, *x1, *x2, *xnew, xo, xn, *xold, *xtmp; // set matrix dimensions and allocate memory for matrices if(argc ==2){ Ndim = atoi(argv[1]); } else{ Ndim = DEF_SIZE; } printf(" ndim = %d\n",Ndim); A = (TYPE *) malloc(Ndim*Ndim*sizeof(TYPE)); b = (TYPE *) malloc(Ndim*sizeof(TYPE)); x1 = (TYPE *) malloc(Ndim*sizeof(TYPE)); x2 = (TYPE *) malloc(Ndim*sizeof(TYPE)); if (!A || !b || !x1 || !x2) { printf("\n memory allocation error\n"); exit(-1); } // generate our diagonally dominant matrix, A init_diag_dom_near_identity_matrix(Ndim, A); #ifdef VERBOSE mm_print(Ndim, Ndim, A); #endif // // Initialize x and just give b some non-zero random values // for(i=0; i<Ndim; i++){ x1[i] = (TYPE)0.0; x2[i] = (TYPE)0.0; b[i] = (TYPE)(rand()%51)/100.0; } start_time = omp_get_wtime(); // // jacobi iterative solver // conv = LARGE; iters = 0; xnew = x1; xold = x2; while((conv > TOLERANCE) && (iters<MAX_ITERS)) { iters++; xtmp = xnew; // don't copy arrays. xnew = xold; // just swap pointers. xold = xtmp; for (i=0; i<Ndim; i++){ xn = (TYPE) 0.0; #pragma omp simd reduction(+:xn) for (j=0; j<Ndim;j++){ xn+= A[i*Ndim + j]*xold[j]*(i!=j); } xnew[i] = (b[i]-xn)/A[i*Ndim+i]; } // // test convergence // conv = (TYPE) 0.0; #pragma omp simd reduction(+:conv) for (i=0; i<Ndim; i++){ tmp = xnew[i]-xold[i]; conv += tmp*tmp; } conv = sqrt((double)conv); #ifdef DEBUG printf(" conv = %f \n",(float)conv); #endif } elapsed_time = omp_get_wtime() - start_time; printf(" Convergence = %g with %d iterations and %f seconds\n", (float)conv, iters, (float)elapsed_time); // // test answer by multiplying my computed value of x by // the input A matrix and comparing the result with the // input b vector. // err = (TYPE) 0.0; chksum = (TYPE) 0.0; for(i=0;i<Ndim;i++){ xo = (TYPE) 0.0; #pragma omp simd reduction(+:xo) for(j=0; j<Ndim; j++) xo += A[i*Ndim+j]*xnew[j]; tmp = xo - b[i]; #ifdef DEBUG printf(" i=%d, diff = %f, computed b = %f, input b= %f \n", i, (float)tmp, (float)xold[i], (float)b[i]); #endif chksum += xnew[i]; err += tmp*tmp; } err = sqrt((double)err); printf("jacobi solver: err = %f, solution checksum = %f \n", (float)sqrt(err), (float)chksum); free(A); free(b); free(x1); free(x2); }
utils.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2015 by Contributors * \file utils.h * \brief Basic utilility functions. */ #ifndef MXNET_COMMON_UTILS_H_ #define MXNET_COMMON_UTILS_H_ #include <dmlc/logging.h> #include <dmlc/omp.h> #include <nnvm/graph.h> #include <mxnet/engine.h> #include <mxnet/ndarray.h> #include <mxnet/op_attr_types.h> #include <mxnet/graph_attr_types.h> #include <nnvm/graph_attr_types.h> #include <memory> #include <vector> #include <type_traits> #include <utility> #include <random> #include <string> #include <thread> #include <algorithm> #include <functional> #include <limits> #include "../operator/mxnet_op.h" #if MXNET_USE_MKLDNN == 1 #include "../operator/nn/mkldnn/mkldnn_base-inl.h" #endif namespace mxnet { namespace common { /*! * \brief IndPtr should be non-negative, in non-decreasing order, start with 0 * and end with value equal with size of indices. */ struct csr_indptr_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* indptr, const nnvm::dim_t end, const nnvm::dim_t idx_size) { if (indptr[i+1] < 0 || indptr[i+1] < indptr[i] || (i == 0 && indptr[i] != 0) || (i == end - 1 && indptr[end] != idx_size)) *out = kCSRIndPtrErr; } }; /*! * \brief Indices should be non-negative, less than the number of columns * and in ascending order per row. */ struct csr_idx_check { template<typename DType, typename IType, typename RType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const RType* indptr, const nnvm::dim_t ncols) { for (RType j = indptr[i]; j < indptr[i+1]; j++) { if (idx[j] >= ncols || idx[j] < 0 || (j < indptr[i+1] - 1 && idx[j] >= idx[j+1])) { *out = kCSRIdxErr; break; } } } }; /*! * \brief Indices of RSPNDArray should be non-negative, * less than the size of first dimension and in ascending order */ struct rsp_idx_check { template<typename DType, typename IType> MSHADOW_XINLINE static void Map(int i, DType* out, const IType* idx, const nnvm::dim_t end, const nnvm::dim_t nrows) { if ((i < end && idx[i+1] <= idx[i]) || idx[i] < 0 || idx[i] >= nrows) *out = kRSPIdxErr; } }; template<typename xpu> void CheckFormatWrapper(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check); /*! * \brief Check the validity of CSRNDArray. * \param rctx Execution context. * \param input Input NDArray of CSRStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatCSRImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kCSRStorage) << "CheckFormatCSRImpl is for CSRNDArray"; const TShape shape = input.shape(); const TShape idx_shape = input.aux_shape(csr::kIdx); const TShape indptr_shape = input.aux_shape(csr::kIndPtr); const TShape storage_shape = input.storage_shape(); if ((shape.ndim() != 2) || (idx_shape.ndim() != 1 || indptr_shape.ndim() != 1 || storage_shape.ndim() != 1) || (indptr_shape[0] != shape[0] + 1) || (idx_shape[0] != storage_shape[0])) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kCSRShapeErr; }); return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIndPtr), RType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(csr::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<csr_indptr_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), indptr_shape[0] - 1, idx_shape[0]); // no need to check indices if indices are empty if (idx_shape[0] != 0) { Kernel<csr_idx_check, xpu>::Launch(s, indptr_shape[0] - 1, val_xpu.dptr<DType>(), input.aux_data(csr::kIdx).dptr<IType>(), input.aux_data(csr::kIndPtr).dptr<RType>(), shape[1]); } mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); }); } } /*! * \brief Check the validity of RowSparseNDArray. * \param rctx Execution context. * \param input Input NDArray of RowSparseStorage. * \param err_cpu Error number on cpu. * \param full_check If true, rigorous check, O(N) operations, * otherwise basic check, O(1) operations. */ template<typename xpu> void CheckFormatRSPImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { using namespace op::mxnet_op; CHECK_EQ(input.storage_type(), kRowSparseStorage) << "CheckFormatRSPImpl is for RSPNDArray"; const TShape idx_shape = input.aux_shape(rowsparse::kIdx); if (idx_shape[0] != input.storage_shape()[0]) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { DType* err = err_cpu.dptr<DType>(); *err = kRSPShapeErr; }); return; } if (idx_shape[0] == 0) { return; } if (full_check) { MSHADOW_TYPE_SWITCH(err_cpu.type_flag_, DType, { MSHADOW_IDX_TYPE_SWITCH(input.aux_type(rowsparse::kIdx), IType, { mshadow::Stream<xpu> *s = rctx.get_stream<xpu>(); NDArray ret_xpu = NDArray(mshadow::Shape1(1), rctx.get_ctx(), false, err_cpu.type_flag_); TBlob val_xpu = ret_xpu.data(); Kernel<set_to_int<kNormalErr>, xpu>::Launch(s, val_xpu.Size(), val_xpu.dptr<DType>()); Kernel<rsp_idx_check, xpu>::Launch(s, idx_shape[0], val_xpu.dptr<DType>(), input.aux_data(rowsparse::kIdx).dptr<IType>(), idx_shape[0] - 1, input.shape()[0]); mshadow::Copy(err_cpu.get<cpu, 1, DType>(), val_xpu.get<xpu, 1, DType>(s), s); }); }); } } template<typename xpu> void CheckFormatImpl(const RunContext &rctx, const NDArray &input, const TBlob &err_cpu, const bool full_check) { int stype = input.storage_type(); if (stype == kCSRStorage) { CheckFormatCSRImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kRowSparseStorage) { CheckFormatRSPImpl<xpu>(rctx, input, err_cpu, full_check); } else if (stype == kDefaultStorage) { // no-op for default storage } else { LOG(FATAL) << "Unknown storage type " << stype; } } /*! \brief Pick rows specified by user input index array from a row sparse ndarray * and save them in the output sparse ndarray. */ template<typename xpu> void SparseRetainOpForwardRspWrapper(mshadow::Stream<xpu> *s, const NDArray& input_nd, const TBlob& idx_data, const OpReqType req, NDArray* output_nd); /* \brief Casts tensor storage type to the new type. */ template<typename xpu> void CastStorageDispatch(const OpContext& ctx, const NDArray& input, const NDArray& output); /*! \brief returns true if all storage types in `vstorage` are the same as target `stype`. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype) { if (!vstorage.empty()) { for (const auto& i : vstorage) { if (i != stype) return false; } return true; } return false; } /*! \brief returns true if all storage types in `vstorage` are the same as target `stype1` * or `stype2'. Sets boolean if both found. * false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const StorageTypeVector& vstorage, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!vstorage.empty()) { uint8_t has = 0; for (const auto i : vstorage) { if (i == stype1) { has |= 1; } else if (i == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as target `stype`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() != stype) { return false; } } return true; } return false; } /*! \brief returns true if the storage types of arrays in `ndarrays` * are the same as targets `stype1` or `stype2`. false is returned for empty inputs. */ inline bool ContainsOnlyStorage(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype1, const NDArrayStorageType stype2, bool *has_both) { if (has_both) { *has_both = false; } if (!ndarrays.empty()) { uint8_t has = 0; for (const auto& nd : ndarrays) { const NDArrayStorageType stype = nd.storage_type(); if (stype == stype1) { has |= 1; } else if (stype == stype2) { has |= 2; } else { return false; } } if (has_both) { *has_both = has == 3; } return true; } return false; } /*! \brief returns true if storage type of any array in `ndarrays` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<NDArray>& ndarrays, const NDArrayStorageType stype) { if (!ndarrays.empty()) { for (const auto& nd : ndarrays) { if (nd.storage_type() == stype) { return true; } } } return false; } /*! \brief returns true if any storage type `ndstype` in `ndstypes` * is the same as the target `stype`. false is returned for empty inputs. */ inline bool ContainsStorageType(const std::vector<int>& ndstypes, const NDArrayStorageType stype) { if (!ndstypes.empty()) { for (const auto& ndstype : ndstypes) { if (ndstype == stype) { return true; } } } return false; } /*! \brief get string representation of dispatch_mode */ inline std::string dispatch_mode_string(const DispatchMode x) { switch (x) { case DispatchMode::kFCompute: return "fcompute"; case DispatchMode::kFComputeEx: return "fcompute_ex"; case DispatchMode::kFComputeFallback: return "fcompute_fallback"; case DispatchMode::kVariable: return "variable"; case DispatchMode::kUndefined: return "undefined"; } return "unknown"; } /*! \brief get string representation of storage_type */ inline std::string stype_string(const int x) { switch (x) { case kDefaultStorage: return "default"; case kCSRStorage: return "csr"; case kRowSparseStorage: return "row_sparse"; } return "unknown"; } /*! \brief get string representation of device type */ inline std::string dev_type_string(const int dev_type) { switch (dev_type) { case Context::kCPU: return "cpu"; case Context::kGPU: return "gpu"; case Context::kCPUPinned: return "cpu_pinned"; case Context::kCPUShared: return "cpu_shared"; } return "unknown"; } /*! \brief get string representation of the operator stypes */ inline std::string operator_stype_string(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>& in_attrs, const std::vector<int>& out_attrs) { std::ostringstream os; os << "operator = " << attrs.op->name << "\ninput storage types = ["; for (const int attr : in_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "output storage types = ["; for (const int attr : out_attrs) { os << stype_string(attr) << ", "; } os << "]\n" << "params = {"; for (auto kv : attrs.dict) { os << "\"" << kv.first << "\" : " << kv.second << ", "; } os << "}\n" << "context.dev_mask = " << dev_type_string(dev_mask); return os.str(); } /*! \brief get string representation of the operator */ inline std::string operator_string(const nnvm::NodeAttrs& attrs, const OpContext& ctx, const std::vector<NDArray>& inputs, const std::vector<OpReqType>& req, const std::vector<NDArray>& outputs) { std::string result = ""; std::vector<int> in_stypes; std::vector<int> out_stypes; in_stypes.reserve(inputs.size()); out_stypes.reserve(outputs.size()); auto xform = [](const NDArray arr) -> int { return arr.storage_type(); }; std::transform(inputs.begin(), inputs.end(), std::back_inserter(in_stypes), xform); std::transform(outputs.begin(), outputs.end(), std::back_inserter(out_stypes), xform); result += operator_stype_string(attrs, ctx.run_ctx.ctx.dev_mask(), in_stypes, out_stypes); return result; } /*! \brief log message once. Intended for storage fallback warning messages. */ inline void LogOnce(const std::string& message) { typedef dmlc::ThreadLocalStore<std::unordered_set<std::string>> LogStore; auto log_store = LogStore::Get(); if (log_store->find(message) == log_store->end()) { LOG(INFO) << message; log_store->insert(message); } } /*! \brief log storage fallback event */ inline void LogStorageFallback(const nnvm::NodeAttrs& attrs, const int dev_mask, const std::vector<int>* in_attrs, const std::vector<int>* out_attrs) { static bool log = dmlc::GetEnv("MXNET_STORAGE_FALLBACK_LOG_VERBOSE", true); if (!log) return; const std::string op_str = operator_stype_string(attrs, dev_mask, *in_attrs, *out_attrs); std::ostringstream os; const char* warning = "\nThe operator with default storage type will be dispatched " "for execution. You're seeing this warning message because the operator above is unable " "to process the given ndarrays with specified storage types, context and parameter. " "Temporary dense ndarrays are generated in order to execute the operator. " "This does not affect the correctness of the programme. " "You can set environment variable MXNET_STORAGE_FALLBACK_LOG_VERBOSE to " "0 to suppress this warning."; os << "\nStorage type fallback detected:\n" << op_str << warning; LogOnce(os.str()); #if MXNET_USE_MKLDNN == 1 if (!MKLDNNEnvSet()) common::LogOnce("MXNET_MKLDNN_ENABLED flag is off. " "You can re-enable by setting MXNET_MKLDNN_ENABLED=1"); #endif } // heuristic to dermine number of threads per GPU inline int GetNumThreadsPerGPU() { // This is resource efficient option. return dmlc::GetEnv("MXNET_GPU_WORKER_NTHREADS", 2); } // heuristic to get number of matching colors. // this decides how much parallelism we can get in each GPU. inline int GetExecNumMatchColor() { // This is resource efficient option. int num_match_color = dmlc::GetEnv("MXNET_EXEC_NUM_TEMP", 1); return std::min(num_match_color, GetNumThreadsPerGPU()); } template<typename T, typename V> V ParallelAccumulate(const T* a, const int n, V start) { V sum = start; #pragma omp parallel for reduction(+:sum) for (int i = 0; i < n; ++i) { sum += a[i]; } return sum; } /*! * \brief * Helper function for ParallelSort. * DO NOT call this function directly. * Use the interface ParallelSort instead. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSortHelper(RandomIt first, size_t len, size_t grainsize, const Compare& comp) { if (len < grainsize) { std::sort(first, first+len, comp); } else { std::thread thr(ParallelSortHelper<RandomIt, Compare>, first, len/2, grainsize, comp); ParallelSortHelper(first+len/2, len - len/2, grainsize, comp); thr.join(); std::inplace_merge(first, first+len/2, first+len, comp); } } /*! * \brief * Sort the elements in the range [first, last) into the ascending order defined by * the comparator comp. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt, typename Compare> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads, Compare comp) { const auto num = std::distance(first, last); size_t grainsize = std::max(num / num_threads + 5, static_cast<size_t>(1024*16)); ParallelSortHelper(first, num, grainsize, comp); } /*! * \brief * Sort the elements in the range [first, last) into ascending order. * The elements are compared using the default < operator. * If the length of the range [first, last) is greater than a certain threshold, * the range will be recursively divided into two and assign two threads * to sort each half range. * Ref: https://github.com/dmlc/difacto/blob/master/src/common/parallel_sort.h */ template<typename RandomIt> void ParallelSort(RandomIt first, RandomIt last, size_t num_threads) { ParallelSort(first, last, num_threads, std::less<typename std::iterator_traits<RandomIt>::value_type>()); } /*! * \brief Random Engine */ typedef std::mt19937 RANDOM_ENGINE; /*! * \brief Helper functions. */ namespace helper { /*! * \brief Helper for non-array type `T`. */ template <class T> struct UniqueIf { /*! * \brief Type of `T`. */ using SingleObject = std::unique_ptr<T>; }; /*! * \brief Helper for an array of unknown bound `T`. */ template <class T> struct UniqueIf<T[]> { /*! * \brief Type of `T`. */ using UnknownBound = std::unique_ptr<T[]>; }; /*! * \brief Helper for an array of known bound `T`. */ template <class T, size_t kSize> struct UniqueIf<T[kSize]> { /*! * \brief Type of `T`. */ using KnownBound = void; }; } // namespace helper /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs a non-array type `T`. The arguments `args` are passed to the * constructor of `T`. The function does not participate in the overload * resolution if `T` is an array type. */ template <class T, class... Args> typename helper::UniqueIf<T>::SingleObject MakeUnique(Args&&... args) { return std::unique_ptr<T>(new T(std::forward<Args>(args)...)); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param n The size of the array to construct. * \return `std``::``unique_ptr` of an instance of type `T`. * * Constructs an array of unknown bound `T`. The function does not participate * in the overload resolution unless `T` is an array of unknown bound. */ template <class T> typename helper::UniqueIf<T>::UnknownBound MakeUnique(size_t n) { using U = typename std::remove_extent<T>::type; return std::unique_ptr<T>(new U[n]{}); } /*! * \brief Constructs an object of type `T` and wraps it in a * `std``::``unique_ptr`. * \param args List of arguments with which an instance of `T` will be * constructed. * * Constructs an arrays of known bound is disallowed. */ template <class T, class... Args> typename helper::UniqueIf<T>::KnownBound MakeUnique(Args&&... args) = delete; template<typename FCompType> FCompType GetFCompute(const nnvm::Op* op, const std::string& name, const Context& ctx) { static auto& fcompute_cpu = nnvm::Op::GetAttr<FCompType>(name + "<cpu>"); static auto& fcompute_gpu = nnvm::Op::GetAttr<FCompType>(name + "<gpu>"); if (ctx.dev_mask() == cpu::kDevMask) { return fcompute_cpu.get(op, nullptr); } else if (ctx.dev_mask() == gpu::kDevMask) { return fcompute_gpu.get(op, nullptr); } else { LOG(FATAL) << "Unknown device mask"; return nullptr; } } /*! * \brief Return the max integer value representable in the type `T` without loss of precision. */ template <typename T> constexpr size_t MaxIntegerValue() { return std::is_integral<T>::value ? std::numeric_limits<T>::max(): size_t(2) << (std::numeric_limits<T>::digits - 1); } template <> constexpr size_t MaxIntegerValue<mshadow::half::half_t>() { return size_t(2) << 10; } MSHADOW_XINLINE int ilog2ul(size_t a) { int k = 1; while (a >>= 1) ++k; return k; } MSHADOW_XINLINE int ilog2ui(unsigned int a) { int k = 1; while (a >>= 1) ++k; return k; } /*! * \brief Return an NDArray of all zeros. */ inline NDArray InitZeros(const NDArrayStorageType stype, const TShape &shape, const Context &ctx, const int dtype) { // NDArray with default storage if (stype == kDefaultStorage) { NDArray ret(shape, ctx, false, dtype); ret = 0; return ret; } // NDArray with non-default storage. Storage allocation is always delayed. return NDArray(stype, shape, ctx, true, dtype); } /*! * \brief Helper to add a NDArray of zeros to a std::vector. */ inline void EmplaceBackZeros(const NDArrayStorageType stype, const TShape &shape, const Context &ctx, const int dtype, std::vector<NDArray> *vec) { // NDArray with default storage if (stype == kDefaultStorage) { vec->emplace_back(shape, ctx, false, dtype); vec->back() = 0; } else { // NDArray with non-default storage. Storage allocation is always delayed. vec->emplace_back(stype, shape, ctx, true, dtype); } } } // namespace common } // namespace mxnet #endif // MXNET_COMMON_UTILS_H_
BKTree.h
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #ifndef _SPTAG_COMMON_BKTREE_H_ #define _SPTAG_COMMON_BKTREE_H_ #include <stack> #include <string> #include <vector> #include <shared_mutex> #include "../VectorIndex.h" #include "CommonUtils.h" #include "QueryResultSet.h" #include "WorkSpace.h" #include "Dataset.h" #include "DistanceUtils.h" namespace SPTAG { namespace COMMON { // node type for storing BKT struct BKTNode { SizeType centerid; SizeType childStart; SizeType childEnd; BKTNode(SizeType cid = -1) : centerid(cid), childStart(-1), childEnd(-1) {} }; template <typename T> struct KmeansArgs { int _K; int _DK; DimensionType _D; int _T; DistCalcMethod _M; T* centers; T* newTCenters; SizeType* counts; float* newCenters; SizeType* newCounts; int* label; SizeType* clusterIdx; float* clusterDist; float* weightedCounts; float* newWeightedCounts; float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length); KmeansArgs(int k, DimensionType dim, SizeType datasize, int threadnum, DistCalcMethod distMethod) : _K(k), _DK(k), _D(dim), _T(threadnum), _M(distMethod) { centers = (T*)aligned_malloc(sizeof(T) * k * dim, ALIGN); newTCenters = (T*)aligned_malloc(sizeof(T) * k * dim, ALIGN); counts = new SizeType[k]; newCenters = new float[threadnum * k * dim]; newCounts = new SizeType[threadnum * k]; label = new int[datasize]; clusterIdx = new SizeType[threadnum * k]; clusterDist = new float[threadnum * k]; weightedCounts = new float[k]; newWeightedCounts = new float[threadnum * k]; fComputeDistance = COMMON::DistanceCalcSelector<T>(distMethod); } ~KmeansArgs() { aligned_free(centers); aligned_free(newTCenters); delete[] counts; delete[] newCenters; delete[] newCounts; delete[] label; delete[] clusterIdx; delete[] clusterDist; delete[] weightedCounts; delete[] newWeightedCounts; } inline void ClearCounts() { memset(newCounts, 0, sizeof(SizeType) * _T * _K); memset(newWeightedCounts, 0, sizeof(float) * _T * _K); } inline void ClearCenters() { memset(newCenters, 0, sizeof(float) * _T * _K * _D); } inline void ClearDists(float dist) { for (int i = 0; i < _T * _K; i++) { clusterIdx[i] = -1; clusterDist[i] = dist; } } void Shuffle(std::vector<SizeType>& indices, SizeType first, SizeType last) { SizeType* pos = new SizeType[_K]; pos[0] = first; for (int k = 1; k < _K; k++) pos[k] = pos[k - 1] + newCounts[k - 1]; for (int k = 0; k < _K; k++) { if (newCounts[k] == 0) continue; SizeType i = pos[k]; while (newCounts[k] > 0) { SizeType swapid = pos[label[i]] + newCounts[label[i]] - 1; newCounts[label[i]]--; std::swap(indices[i], indices[swapid]); std::swap(label[i], label[swapid]); } while (indices[i] != clusterIdx[k]) i++; std::swap(indices[i], indices[pos[k] + counts[k] - 1]); } delete[] pos; } }; template <typename T> float RefineCenters(const Dataset<T>& data, KmeansArgs<T>& args) { int maxcluster = -1; SizeType maxCount = 0; for (int k = 0; k < args._DK; k++) { if (args.counts[k] > maxCount && args.newCounts[k] > 0 && DistanceUtils::ComputeDistance((T*)data[args.clusterIdx[k]], args.centers + k * args._D, args._D, DistCalcMethod::L2) > 1e-6) { maxcluster = k; maxCount = args.counts[k]; } } if (maxcluster != -1 && (args.clusterIdx[maxcluster] < 0 || args.clusterIdx[maxcluster] >= data.R())) LOG(Helper::LogLevel::LL_Debug, "maxcluster:%d(%d) Error dist:%f\n", maxcluster, args.newCounts[maxcluster], args.clusterDist[maxcluster]); float diff = 0; for (int k = 0; k < args._DK; k++) { T* TCenter = args.newTCenters + k * args._D; if (args.counts[k] == 0) { if (maxcluster != -1) { //int nextid = Utils::rand_int(last, first); //while (args.label[nextid] != maxcluster) nextid = Utils::rand_int(last, first); SizeType nextid = args.clusterIdx[maxcluster]; std::memcpy(TCenter, data[nextid], sizeof(T)*args._D); } else { std::memcpy(TCenter, args.centers + k * args._D, sizeof(T)*args._D); } } else { float* currCenters = args.newCenters + k * args._D; for (DimensionType j = 0; j < args._D; j++) currCenters[j] /= args.counts[k]; if (args._M == DistCalcMethod::Cosine) { COMMON::Utils::Normalize(currCenters, args._D, COMMON::Utils::GetBase<T>()); } for (DimensionType j = 0; j < args._D; j++) TCenter[j] = (T)(currCenters[j]); } diff += args.fComputeDistance(args.centers + k*args._D, TCenter, args._D); } return diff; } template <typename T> inline float KmeansAssign(const Dataset<T>& data, std::vector<SizeType>& indices, const SizeType first, const SizeType last, KmeansArgs<T>& args, const bool updateCenters, float lambda) { float currDist = 0; SizeType subsize = (last - first - 1) / args._T + 1; #pragma omp parallel for num_threads(args._T) shared(data, indices) reduction(+:currDist) for (int tid = 0; tid < args._T; tid++) { SizeType istart = first + tid * subsize; SizeType iend = min(first + (tid + 1) * subsize, last); SizeType *inewCounts = args.newCounts + tid * args._K; float *inewCenters = args.newCenters + tid * args._K * args._D; SizeType * iclusterIdx = args.clusterIdx + tid * args._K; float * iclusterDist = args.clusterDist + tid * args._K; float idist = 0; for (SizeType i = istart; i < iend; i++) { int clusterid = 0; float smallestDist = MaxDist; for (int k = 0; k < args._DK; k++) { float dist = args.fComputeDistance(data[indices[i]], args.centers + k*args._D, args._D) + lambda*args.counts[k]; if (dist > -MaxDist && dist < smallestDist) { clusterid = k; smallestDist = dist; } } args.label[i] = clusterid; inewCounts[clusterid]++; idist += smallestDist; if (updateCenters) { const T* v = (const T*)data[indices[i]]; float* center = inewCenters + clusterid*args._D; for (DimensionType j = 0; j < args._D; j++) center[j] += v[j]; if (smallestDist > iclusterDist[clusterid]) { iclusterDist[clusterid] = smallestDist; iclusterIdx[clusterid] = indices[i]; } } else { if (smallestDist <= iclusterDist[clusterid]) { iclusterDist[clusterid] = smallestDist; iclusterIdx[clusterid] = indices[i]; } } } currDist += idist; } for (int i = 1; i < args._T; i++) { for (int k = 0; k < args._DK; k++) args.newCounts[k] += args.newCounts[i*args._K + k]; } if (updateCenters) { for (int i = 1; i < args._T; i++) { float* currCenter = args.newCenters + i*args._K*args._D; for (size_t j = 0; j < ((size_t)args._DK) * args._D; j++) args.newCenters[j] += currCenter[j]; for (int k = 0; k < args._DK; k++) { if (args.clusterIdx[i*args._K + k] != -1 && args.clusterDist[i*args._K + k] > args.clusterDist[k]) { args.clusterDist[k] = args.clusterDist[i*args._K + k]; args.clusterIdx[k] = args.clusterIdx[i*args._K + k]; } } } } else { for (int i = 1; i < args._T; i++) { for (int k = 0; k < args._DK; k++) { if (args.clusterIdx[i*args._K + k] != -1 && args.clusterDist[i*args._K + k] <= args.clusterDist[k]) { args.clusterDist[k] = args.clusterDist[i*args._K + k]; args.clusterIdx[k] = args.clusterIdx[i*args._K + k]; } } } } return currDist; } template <typename T> inline void InitCenters(const Dataset<T>& data, std::vector<SizeType>& indices, const SizeType first, const SizeType last, KmeansArgs<T>& args, int samples, int tryIters) { SizeType batchEnd = min(first + samples, last); float currDist, minClusterDist = MaxDist; for (int numKmeans = 0; numKmeans < tryIters; numKmeans++) { for (int k = 0; k < args._DK; k++) { SizeType randid = COMMON::Utils::rand(last, first); std::memcpy(args.centers + k*args._D, data[indices[randid]], sizeof(T)*args._D); } args.ClearCounts(); args.ClearDists(MaxDist); currDist = KmeansAssign(data, indices, first, batchEnd, args, false, 0); if (currDist < minClusterDist) { minClusterDist = currDist; memcpy(args.newTCenters, args.centers, sizeof(T)*args._K*args._D); memcpy(args.counts, args.newCounts, sizeof(SizeType) * args._K); } } } template <typename T> int KmeansClustering(const Dataset<T>& data, std::vector<SizeType>& indices, const SizeType first, const SizeType last, KmeansArgs<T>& args, int samples = 1000) { InitCenters(data, indices, first, last, args, samples, 3); SizeType batchEnd = min(first + samples, last); float currDiff, currDist, minClusterDist = MaxDist; int noImprovement = 0; for (int iter = 0; iter < 100; iter++) { std::memcpy(args.centers, args.newTCenters, sizeof(T)*args._K*args._D); std::random_shuffle(indices.begin() + first, indices.begin() + last); args.ClearCenters(); args.ClearCounts(); args.ClearDists(-MaxDist); currDist = KmeansAssign(data, indices, first, batchEnd, args, true, COMMON::Utils::GetBase<T>() * COMMON::Utils::GetBase<T>() / (100.0f * (batchEnd - first))); std::memcpy(args.counts, args.newCounts, sizeof(SizeType) * args._K); if (currDist < minClusterDist) { noImprovement = 0; minClusterDist = currDist; } else { noImprovement++; } currDiff = RefineCenters(data, args); if (currDiff < 1e-3 || noImprovement >= 5) break; } args.ClearCounts(); args.ClearDists(MaxDist); currDist = KmeansAssign(data, indices, first, last, args, false, 0); std::memcpy(args.counts, args.newCounts, sizeof(SizeType) * args._K); int numClusters = 0; for (int i = 0; i < args._K; i++) if (args.counts[i] > 0) numClusters++; if (numClusters <= 1) { return numClusters; } args.Shuffle(indices, first, last); return numClusters; } class BKTree { public: BKTree(): m_iTreeNumber(1), m_iBKTKmeansK(32), m_iBKTLeafSize(8), m_iSamples(1000), m_lock(new std::shared_timed_mutex) {} BKTree(const BKTree& other): m_iTreeNumber(other.m_iTreeNumber), m_iBKTKmeansK(other.m_iBKTKmeansK), m_iBKTLeafSize(other.m_iBKTLeafSize), m_iSamples(other.m_iSamples), m_lock(new std::shared_timed_mutex) {} ~BKTree() {} inline const BKTNode& operator[](SizeType index) const { return m_pTreeRoots[index]; } inline BKTNode& operator[](SizeType index) { return m_pTreeRoots[index]; } inline SizeType size() const { return (SizeType)m_pTreeRoots.size(); } inline SizeType sizePerTree() const { std::shared_lock<std::shared_timed_mutex> lock(*m_lock); return (SizeType)m_pTreeRoots.size() - m_pTreeStart.back(); } inline const std::unordered_map<SizeType, SizeType>& GetSampleMap() const { return m_pSampleCenterMap; } template <typename T> void Rebuild(const Dataset<T>& data, DistCalcMethod distMethod) { BKTree newTrees(*this); newTrees.BuildTrees<T>(data, distMethod, 1); std::unique_lock<std::shared_timed_mutex> lock(*m_lock); m_pTreeRoots.swap(newTrees.m_pTreeRoots); m_pTreeStart.swap(newTrees.m_pTreeStart); m_pSampleCenterMap.swap(newTrees.m_pSampleCenterMap); } template <typename T> void BuildTrees(const Dataset<T>& data, DistCalcMethod distMethod, int numOfThreads, std::vector<SizeType>* indices = nullptr, std::vector<SizeType>* reverseIndices = nullptr, bool dynamicK = false) { struct BKTStackItem { SizeType index, first, last; BKTStackItem(SizeType index_, SizeType first_, SizeType last_) : index(index_), first(first_), last(last_) {} }; std::stack<BKTStackItem> ss; std::vector<SizeType> localindices; if (indices == nullptr) { localindices.resize(data.R()); for (SizeType i = 0; i < localindices.size(); i++) localindices[i] = i; } else { localindices.assign(indices->begin(), indices->end()); } KmeansArgs<T> args(m_iBKTKmeansK, data.C(), (SizeType)localindices.size(), numOfThreads, distMethod); m_pSampleCenterMap.clear(); for (char i = 0; i < m_iTreeNumber; i++) { std::random_shuffle(localindices.begin(), localindices.end()); m_pTreeStart.push_back((SizeType)m_pTreeRoots.size()); m_pTreeRoots.emplace_back((SizeType)localindices.size()); LOG(Helper::LogLevel::LL_Info, "Start to build BKTree %d\n", i + 1); ss.push(BKTStackItem(m_pTreeStart[i], 0, (SizeType)localindices.size())); while (!ss.empty()) { BKTStackItem item = ss.top(); ss.pop(); SizeType newBKTid = (SizeType)m_pTreeRoots.size(); m_pTreeRoots[item.index].childStart = newBKTid; if (item.last - item.first <= m_iBKTLeafSize) { for (SizeType j = item.first; j < item.last; j++) { SizeType cid = (reverseIndices == nullptr)? localindices[j]: reverseIndices->at(localindices[j]); m_pTreeRoots.emplace_back(cid); } } else { // clustering the data into BKTKmeansK clusters if (dynamicK) { args._DK = std::min<int>((item.last - item.first) / m_iBKTLeafSize + 1, m_iBKTKmeansK); args._DK = std::max<int>(args._DK, 2); } int numClusters = KmeansClustering(data, localindices, item.first, item.last, args, m_iSamples); if (numClusters <= 1) { SizeType end = min(item.last + 1, (SizeType)localindices.size()); std::sort(localindices.begin() + item.first, localindices.begin() + end); m_pTreeRoots[item.index].centerid = (reverseIndices == nullptr) ? localindices[item.first] : reverseIndices->at(localindices[item.first]); m_pTreeRoots[item.index].childStart = -m_pTreeRoots[item.index].childStart; for (SizeType j = item.first + 1; j < end; j++) { SizeType cid = (reverseIndices == nullptr) ? localindices[j] : reverseIndices->at(localindices[j]); m_pTreeRoots.emplace_back(cid); m_pSampleCenterMap[cid] = m_pTreeRoots[item.index].centerid; } m_pSampleCenterMap[-1 - m_pTreeRoots[item.index].centerid] = item.index; } else { for (int k = 0; k < m_iBKTKmeansK; k++) { if (args.counts[k] == 0) continue; SizeType cid = (reverseIndices == nullptr) ? localindices[item.first + args.counts[k] - 1] : reverseIndices->at(localindices[item.first + args.counts[k] - 1]); m_pTreeRoots.emplace_back(cid); if (args.counts[k] > 1) ss.push(BKTStackItem(newBKTid++, item.first, item.first + args.counts[k] - 1)); item.first += args.counts[k]; } } } m_pTreeRoots[item.index].childEnd = (SizeType)m_pTreeRoots.size(); } m_pTreeRoots.emplace_back(-1); LOG(Helper::LogLevel::LL_Info, "%d BKTree built, %zu %zu\n", i + 1, m_pTreeRoots.size() - m_pTreeStart[i], localindices.size()); } } inline std::uint64_t BufferSize() const { return sizeof(int) + sizeof(SizeType) * m_iTreeNumber + sizeof(SizeType) + sizeof(BKTNode) * m_pTreeRoots.size(); } ErrorCode SaveTrees(std::shared_ptr<Helper::DiskPriorityIO> p_out) const { std::shared_lock<std::shared_timed_mutex> lock(*m_lock); IOBINARY(p_out, WriteBinary, sizeof(m_iTreeNumber), (char*)&m_iTreeNumber); IOBINARY(p_out, WriteBinary, sizeof(SizeType) * m_iTreeNumber, (char*)m_pTreeStart.data()); SizeType treeNodeSize = (SizeType)m_pTreeRoots.size(); IOBINARY(p_out, WriteBinary, sizeof(treeNodeSize), (char*)&treeNodeSize); IOBINARY(p_out, WriteBinary, sizeof(BKTNode) * treeNodeSize, (char*)m_pTreeRoots.data()); LOG(Helper::LogLevel::LL_Info, "Save BKT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize); return ErrorCode::Success; } ErrorCode SaveTrees(std::string sTreeFileName) const { LOG(Helper::LogLevel::LL_Info, "Save BKT to %s\n", sTreeFileName.c_str()); auto ptr = f_createIO(); if (ptr == nullptr || !ptr->Initialize(sTreeFileName.c_str(), std::ios::binary | std::ios::out)) return ErrorCode::FailedCreateFile; return SaveTrees(ptr); } ErrorCode LoadTrees(char* pBKTMemFile) { m_iTreeNumber = *((int*)pBKTMemFile); pBKTMemFile += sizeof(int); m_pTreeStart.resize(m_iTreeNumber); memcpy(m_pTreeStart.data(), pBKTMemFile, sizeof(SizeType) * m_iTreeNumber); pBKTMemFile += sizeof(SizeType)*m_iTreeNumber; SizeType treeNodeSize = *((SizeType*)pBKTMemFile); pBKTMemFile += sizeof(SizeType); m_pTreeRoots.resize(treeNodeSize); memcpy(m_pTreeRoots.data(), pBKTMemFile, sizeof(BKTNode) * treeNodeSize); if (m_pTreeRoots.size() > 0 && m_pTreeRoots.back().centerid != -1) m_pTreeRoots.emplace_back(-1); LOG(Helper::LogLevel::LL_Info, "Load BKT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize); return ErrorCode::Success; } ErrorCode LoadTrees(std::shared_ptr<Helper::DiskPriorityIO> p_input) { IOBINARY(p_input, ReadBinary, sizeof(m_iTreeNumber), (char*)&m_iTreeNumber); m_pTreeStart.resize(m_iTreeNumber); IOBINARY(p_input, ReadBinary, sizeof(SizeType) * m_iTreeNumber, (char*)m_pTreeStart.data()); SizeType treeNodeSize; IOBINARY(p_input, ReadBinary, sizeof(treeNodeSize), (char*)&treeNodeSize); m_pTreeRoots.resize(treeNodeSize); IOBINARY(p_input, ReadBinary, sizeof(BKTNode) * treeNodeSize, (char*)m_pTreeRoots.data()); if (m_pTreeRoots.size() > 0 && m_pTreeRoots.back().centerid != -1) m_pTreeRoots.emplace_back(-1); LOG(Helper::LogLevel::LL_Info, "Load BKT (%d,%d) Finish!\n", m_iTreeNumber, treeNodeSize); return ErrorCode::Success; } ErrorCode LoadTrees(std::string sTreeFileName) { LOG(Helper::LogLevel::LL_Info, "Load BKT From %s\n", sTreeFileName.c_str()); auto ptr = f_createIO(); if (ptr == nullptr || !ptr->Initialize(sTreeFileName.c_str(), std::ios::binary | std::ios::in)) return ErrorCode::FailedOpenFile; return LoadTrees(ptr); } template <typename T> void InitSearchTrees(const Dataset<T>& data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space) const { for (char i = 0; i < m_iTreeNumber; i++) { const BKTNode& node = m_pTreeRoots[m_pTreeStart[i]]; if (node.childStart < 0) { p_space.m_SPTQueue.insert(COMMON::HeapCell(m_pTreeStart[i], fComputeDistance(p_query.GetTarget(), data[node.centerid], data.C()))); } else { for (SizeType begin = node.childStart; begin < node.childEnd; begin++) { SizeType index = m_pTreeRoots[begin].centerid; p_space.m_SPTQueue.insert(COMMON::HeapCell(begin, fComputeDistance(p_query.GetTarget(), data[index], data.C()))); } } } } template <typename T> void SearchTrees(const Dataset<T>& data, float(*fComputeDistance)(const T* pX, const T* pY, DimensionType length), const COMMON::QueryResultSet<T> &p_query, COMMON::WorkSpace &p_space, const int p_limits) const { while (!p_space.m_SPTQueue.empty()) { COMMON::HeapCell bcell = p_space.m_SPTQueue.pop(); const BKTNode& tnode = m_pTreeRoots[bcell.node]; if (tnode.childStart < 0) { if (!p_space.CheckAndSet(tnode.centerid)) { p_space.m_iNumberOfCheckedLeaves++; p_space.m_NGQueue.insert(COMMON::HeapCell(tnode.centerid, bcell.distance)); } if (p_space.m_iNumberOfCheckedLeaves >= p_limits) break; } else { if (!p_space.CheckAndSet(tnode.centerid)) { p_space.m_NGQueue.insert(COMMON::HeapCell(tnode.centerid, bcell.distance)); } for (SizeType begin = tnode.childStart; begin < tnode.childEnd; begin++) { SizeType index = m_pTreeRoots[begin].centerid; p_space.m_SPTQueue.insert(COMMON::HeapCell(begin, fComputeDistance(p_query.GetTarget(), data[index], data.C()))); } } } } private: std::vector<SizeType> m_pTreeStart; std::vector<BKTNode> m_pTreeRoots; std::unordered_map<SizeType, SizeType> m_pSampleCenterMap; public: std::unique_ptr<std::shared_timed_mutex> m_lock; int m_iTreeNumber, m_iBKTKmeansK, m_iBKTLeafSize, m_iSamples; }; } } #endif
paraloopend.h
/** \file elbeem/intern/paraloopend.h * \ingroup elbeem */ // same as grid loop_end + barrier } // i int i=0; //dummy ADVANCE_POINTERS(2*gridLoopBound); } // j # if COMPRESSGRIDS==1 # if PARALLEL==1 //frintf(stderr," (id=%d k=%d) ",id,k); #pragma omp barrier # endif // PARALLEL==1 # else // COMPRESSGRIDS==1 int i=0; //dummy ADVANCE_POINTERS(mLevel[lev].lSizex*2); # endif // COMPRESSGRIDS==1 } // all cell loop k,j,i #pragma omp critical { if(doReduce) { // synchronize global vars for(size_t j=0; j<calcListFull.size() ; j++) mListFull.push_back( calcListFull[j] ); for(size_t j=0; j<calcListEmpty.size(); j++) mListEmpty.push_back( calcListEmpty[j] ); for(size_t j=0; j<calcListParts.size(); j++) mpParticles->addFullParticle( calcListParts[j] ); if(calcMaxVlen>mMaxVlen) { mMxvx = calcMxvx; mMxvy = calcMxvy; mMxvz = calcMxvz; mMaxVlen = calcMaxVlen; } if(0) {debMsgStd("OMP_CRIT",DM_MSG, "reduce id"<<id<<" curr: "<<mMaxVlen<<"|"<<mMxvx<<","<<mMxvy<<","<<mMxvz<< " calc[ "<<calcMaxVlen<<"|"<<calcMxvx<<","<<calcMxvy<<","<<calcMxvz<<"] " ,4 ); } } } // critical } /* main_region */ //?lobOutstrForce = true;
GB_unaryop__abs_int64_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__abs_int64_uint8 // op(A') function: GB_tran__abs_int64_uint8 // C type: int64_t // A type: uint8_t // cast: int64_t cij = (int64_t) aij // unaryop: cij = GB_IABS (aij) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ int64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = GB_IABS (x) ; // casting #define GB_CASTING(z, aij) \ int64_t z = (int64_t) aij ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_INT64 || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__abs_int64_uint8 ( int64_t *Cx, // Cx and Ax may be aliased uint8_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__abs_int64_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
colormap.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % CCCC OOO L OOO RRRR M M AAA PPPP % % C O O L O O R R MM MM A A P P % % C O O L O O RRRR M M M AAAAA PPPP % % C O O L O O R R M M A A P % % CCCC OOO LLLLL OOO R R M M A A P % % % % % % MagickCore Colormap Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2020 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % We use linked-lists because splay-trees do not currently support duplicate % key / value pairs (.e.g X11 green compliance and SVG green compliance). % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/cache.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/client.h" #include "MagickCore/configure.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/resource_.h" #include "MagickCore/semaphore.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/token.h" #include "MagickCore/utility.h" #include "MagickCore/xml-tree.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImageColormap() allocates an image colormap and initializes % it to a linear gray colorspace. If the image already has a colormap, % it is replaced. AcquireImageColormap() returns MagickTrue if successful, % otherwise MagickFalse if there is not enough memory. % % The format of the AcquireImageColormap method is: % % MagickBooleanType AcquireImageColormap(Image *image,const size_t colors, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o colors: the number of colors in the image colormap. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AcquireImageColormap(Image *image, const size_t colors,ExceptionInfo *exception) { register ssize_t i; /* Allocate image colormap. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); image->colors=MagickMax(colors,1); if (image->colormap == (PixelInfo *) NULL) image->colormap=(PixelInfo *) AcquireQuantumMemory(image->colors+1, sizeof(*image->colormap)); else image->colormap=(PixelInfo *) ResizeQuantumMemory(image->colormap, image->colors+1,sizeof(*image->colormap)); if (image->colormap == (PixelInfo *) NULL) { image->colors=0; image->storage_class=DirectClass; ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } for (i=0; i < (ssize_t) image->colors; i++) { double pixel; GetPixelInfo(image,image->colormap+i); pixel=(double) (i*(QuantumRange/MagickMax(colors-1,1))); image->colormap[i].red=pixel; image->colormap[i].green=pixel; image->colormap[i].blue=pixel; image->colormap[i].alpha=(MagickRealType) OpaqueAlpha; image->colormap[i].alpha_trait=BlendPixelTrait; } return(SetImageStorageClass(image,PseudoClass,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C y c l e C o l o r m a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CycleColormap() displaces an image's colormap by a given number of % positions. If you cycle the colormap a number of times you can produce % a psychodelic effect. % % WARNING: this assumes an images colormap is in a well know and defined % order. Currently Imagemagick has no way of setting that order. % % The format of the CycleColormapImage method is: % % MagickBooleanType CycleColormapImage(Image *image,const ssize_t displace, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o displace: displace the colormap this amount. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType CycleColormapImage(Image *image, const ssize_t displace,ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == DirectClass) (void) SetImageType(image,PaletteType,exception); status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; ssize_t index; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { index=(ssize_t) (GetPixelIndex(image,q)+displace) % image->colors; if (index < 0) index+=(ssize_t) image->colors; SetPixelIndex(image,(Quantum) index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S o r t C o l o r m a p B y I n t e n s i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SortColormapByIntensity() sorts the colormap of a PseudoClass image by % decreasing color intensity. % % The format of the SortColormapByIntensity method is: % % MagickBooleanType SortColormapByIntensity(Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: A pointer to an Image structure. % % o exception: return any errors or warnings in this structure. % */ #if defined(__cplusplus) || defined(c_plusplus) extern "C" { #endif static int IntensityCompare(const void *x,const void *y) { const PixelInfo *color_1, *color_2; int intensity; color_1=(const PixelInfo *) x; color_2=(const PixelInfo *) y; intensity=(int) GetPixelInfoIntensity((const Image *) NULL,color_2)-(int) GetPixelInfoIntensity((const Image *) NULL,color_1); return(intensity); } #if defined(__cplusplus) || defined(c_plusplus) } #endif MagickExport MagickBooleanType SortColormapByIntensity(Image *image, ExceptionInfo *exception) { CacheView *image_view; MagickBooleanType status; register ssize_t i; ssize_t y; unsigned short *pixels; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->storage_class != PseudoClass) return(MagickTrue); /* Allocate memory for pixel indexes. */ pixels=(unsigned short *) AcquireQuantumMemory((size_t) image->colors, sizeof(*pixels)); if (pixels == (unsigned short *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Assign index values to colormap entries. */ for (i=0; i < (ssize_t) image->colors; i++) image->colormap[i].alpha=(double) i; /* Sort image colormap by decreasing color popularity. */ qsort((void *) image->colormap,(size_t) image->colors, sizeof(*image->colormap),IntensityCompare); /* Update image colormap indexes to sorted colormap order. */ for (i=0; i < (ssize_t) image->colors; i++) pixels[(ssize_t) image->colormap[i].alpha]=(unsigned short) i; status=MagickTrue; image_view=AcquireAuthenticCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { Quantum index; register ssize_t x; register Quantum *magick_restrict q; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; break; } for (x=0; x < (ssize_t) image->columns; x++) { index=(Quantum) pixels[(ssize_t) GetPixelIndex(image,q)]; SetPixelIndex(image,index,q); SetPixelViaPixelInfo(image,image->colormap+(ssize_t) index,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (status == MagickFalse) break; } image_view=DestroyCacheView(image_view); pixels=(unsigned short *) RelinquishMagickMemory(pixels); return(status); }
enhance.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % EEEEE N N H H AAA N N CCCC EEEEE % % E NN N H H A A NN N C E % % EEE N N N HHHHH AAAAA N N N C EEE % % E N NN H H A A N NN C E % % EEEEE N N H H A A N N CCCC EEEEE % % % % % % MagickCore Image Enhancement Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/accelerate-private.h" #include "MagickCore/artifact.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/channel.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite-private.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/gem-private.h" #include "MagickCore/geometry.h" #include "MagickCore/histogram.h" #include "MagickCore/image.h" #include "MagickCore/image-private.h" #include "MagickCore/memory_.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/option.h" #include "MagickCore/pixel.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/resample.h" #include "MagickCore/resample-private.h" #include "MagickCore/resource_.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoGammaImage() extract the 'mean' from the image and adjust the image % to try make set its gamma appropriatally. % % The format of the AutoGammaImage method is: % % MagickBooleanType AutoGammaImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoGammaImage(Image *image, ExceptionInfo *exception) { double gamma, log_mean, mean, sans; MagickStatusType status; register ssize_t i; log_mean=log(0.5); if (image->channel_mask == DefaultChannels) { /* Apply gamma correction equally across all given channels. */ (void) GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; return(LevelImage(image,0.0,(double) QuantumRange,gamma,exception)); } /* Auto-gamma each channel separately. */ status=MagickTrue; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ChannelType channel_mask; PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; channel_mask=SetImageChannelMask(image,(ChannelType) (1 << i)); status=GetImageMean(image,&mean,&sans,exception); gamma=log(mean*QuantumScale)/log_mean; status&=LevelImage(image,0.0,(double) QuantumRange,gamma,exception); (void) SetImageChannelMask(image,channel_mask); if (status == MagickFalse) break; } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoLevelImage() adjusts the levels of a particular image channel by % scaling the minimum and maximum values to the full quantum range. % % The format of the LevelImage method is: % % MagickBooleanType AutoLevelImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-level % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AutoLevelImage(Image *image, ExceptionInfo *exception) { return(MinMaxStretchImage(image,0.0,0.0,1.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B r i g h t n e s s C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BrightnessContrastImage() changes the brightness and/or contrast of an % image. It converts the brightness and contrast parameters into slope and % intercept and calls a polynomical function to apply to the image. % % The format of the BrightnessContrastImage method is: % % MagickBooleanType BrightnessContrastImage(Image *image, % const double brightness,const double contrast,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o brightness: the brightness percent (-100 .. 100). % % o contrast: the contrast percent (-100 .. 100). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BrightnessContrastImage(Image *image, const double brightness,const double contrast,ExceptionInfo *exception) { #define BrightnessContastImageTag "BrightnessContast/Image" double alpha, coefficients[2], intercept, slope; MagickBooleanType status; /* Compute slope and intercept. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); alpha=contrast; slope=tan((double) (MagickPI*(alpha/100.0+1.0)/4.0)); if (slope < 0.0) slope=0.0; intercept=brightness/100.0+((100-brightness)/200.0)*(1.0-slope); coefficients[0]=slope; coefficients[1]=intercept; status=FunctionImage(image,PolynomialFunction,2,coefficients,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClutImage() replaces each color value in the given image, by using it as an % index to lookup a replacement color value in a Color Look UP Table in the % form of an image. The values are extracted along a diagonal of the CLUT % image so either a horizontal or vertial gradient image can be used. % % Typically this is used to either re-color a gray-scale image according to a % color gradient in the CLUT image, or to perform a freeform histogram % (level) adjustment according to the (typically gray-scale) gradient in the % CLUT image. % % When the 'channel' mask includes the matte/alpha transparency channel but % one image has no such channel it is assumed that that image is a simple % gray-scale image that will effect the alpha channel values, either for % gray-scale coloring (with transparent or semi-transparent colors), or % a histogram adjustment of existing alpha channel values. If both images % have matte channels, direct and normal indexing is applied, which is rarely % used. % % The format of the ClutImage method is: % % MagickBooleanType ClutImage(Image *image,Image *clut_image, % const PixelInterpolateMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o clut_image: the color lookup table image for replacement color values. % % o method: the pixel interpolation method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClutImage(Image *image,const Image *clut_image, const PixelInterpolateMethod method,ExceptionInfo *exception) { #define ClutImageTag "Clut/Image" CacheView *clut_view, *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo *clut_map; register ssize_t i; ssize_t adjust, y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(clut_image != (Image *) NULL); assert(clut_image->signature == MagickCoreSignature); if( SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if( (IsGrayColorspace(image->colorspace) != MagickFalse) && (IsGrayColorspace(clut_image->colorspace) == MagickFalse)) (void) SetImageColorspace(image,sRGBColorspace,exception); clut_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*clut_map)); if (clut_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Clut image. */ status=MagickTrue; progress=0; adjust=(ssize_t) (clut_image->interpolate == IntegerInterpolatePixel ? 0 : 1); clut_view=AcquireVirtualCacheView(clut_image,exception); for (i=0; i <= (ssize_t) MaxMap; i++) { GetPixelInfo(clut_image,clut_map+i); (void) InterpolatePixelInfo(clut_image,clut_view,method, (double) i*(clut_image->columns-adjust)/MaxMap,(double) i* (clut_image->rows-adjust)/MaxMap,clut_map+i,exception); } clut_view=DestroyCacheView(clut_view); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { PixelTrait traits; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } GetPixelInfoPixel(image,q,&pixel); traits=GetPixelChannelTraits(image,RedPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.red=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.red))].red; traits=GetPixelChannelTraits(image,GreenPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.green=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.green))].green; traits=GetPixelChannelTraits(image,BluePixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.blue=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.blue))].blue; traits=GetPixelChannelTraits(image,BlackPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.black=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.black))].black; traits=GetPixelChannelTraits(image,AlphaPixelChannel); if ((traits & UpdatePixelTrait) != 0) pixel.alpha=clut_map[ScaleQuantumToMap(ClampToQuantum( pixel.alpha))].alpha; SetPixelViaPixelInfo(image,&pixel,q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClutImage) #endif proceed=SetImageProgress(image,ClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); clut_map=(PixelInfo *) RelinquishMagickMemory(clut_map); if ((clut_image->alpha_trait != UndefinedPixelTrait) && ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0)) (void) SetImageAlphaChannel(image,ActivateAlphaChannel,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r D e c i s i o n L i s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorDecisionListImage() accepts a lightweight Color Correction Collection % (CCC) file which solely contains one or more color corrections and applies % the correction to the image. Here is a sample CCC file: % % <ColorCorrectionCollection xmlns="urn:ASC:CDL:v1.2"> % <ColorCorrection id="cc03345"> % <SOPNode> % <Slope> 0.9 1.2 0.5 </Slope> % <Offset> 0.4 -0.5 0.6 </Offset> % <Power> 1.0 0.8 1.5 </Power> % </SOPNode> % <SATNode> % <Saturation> 0.85 </Saturation> % </SATNode> % </ColorCorrection> % </ColorCorrectionCollection> % % which includes the slop, offset, and power for each of the RGB channels % as well as the saturation. % % The format of the ColorDecisionListImage method is: % % MagickBooleanType ColorDecisionListImage(Image *image, % const char *color_correction_collection,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o color_correction_collection: the color correction collection in XML. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ColorDecisionListImage(Image *image, const char *color_correction_collection,ExceptionInfo *exception) { #define ColorDecisionListCorrectImageTag "ColorDecisionList/Image" typedef struct _Correction { double slope, offset, power; } Correction; typedef struct _ColorCorrection { Correction red, green, blue; double saturation; } ColorCorrection; CacheView *image_view; char token[MagickPathExtent]; ColorCorrection color_correction; const char *content, *p; MagickBooleanType status; MagickOffsetType progress; PixelInfo *cdl_map; register ssize_t i; ssize_t y; XMLTreeInfo *cc, *ccc, *sat, *sop; /* Allocate and initialize cdl maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (color_correction_collection == (const char *) NULL) return(MagickFalse); ccc=NewXMLTree((const char *) color_correction_collection,exception); if (ccc == (XMLTreeInfo *) NULL) return(MagickFalse); cc=GetXMLTreeChild(ccc,"ColorCorrection"); if (cc == (XMLTreeInfo *) NULL) { ccc=DestroyXMLTree(ccc); return(MagickFalse); } color_correction.red.slope=1.0; color_correction.red.offset=0.0; color_correction.red.power=1.0; color_correction.green.slope=1.0; color_correction.green.offset=0.0; color_correction.green.power=1.0; color_correction.blue.slope=1.0; color_correction.blue.offset=0.0; color_correction.blue.power=1.0; color_correction.saturation=0.0; sop=GetXMLTreeChild(cc,"SOPNode"); if (sop != (XMLTreeInfo *) NULL) { XMLTreeInfo *offset, *power, *slope; slope=GetXMLTreeChild(sop,"Slope"); if (slope != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(slope); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.slope=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.slope=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.slope=StringToDouble(token, (char **) NULL); break; } } } } offset=GetXMLTreeChild(sop,"Offset"); if (offset != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(offset); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.offset=StringToDouble(token, (char **) NULL); break; } case 1: { color_correction.green.offset=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.offset=StringToDouble(token, (char **) NULL); break; } } } } power=GetXMLTreeChild(sop,"Power"); if (power != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(power); p=(const char *) content; for (i=0; (*p != '\0') && (i < 3); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); switch (i) { case 0: { color_correction.red.power=StringToDouble(token,(char **) NULL); break; } case 1: { color_correction.green.power=StringToDouble(token, (char **) NULL); break; } case 2: { color_correction.blue.power=StringToDouble(token, (char **) NULL); break; } } } } } sat=GetXMLTreeChild(cc,"SATNode"); if (sat != (XMLTreeInfo *) NULL) { XMLTreeInfo *saturation; saturation=GetXMLTreeChild(sat,"Saturation"); if (saturation != (XMLTreeInfo *) NULL) { content=GetXMLTreeContent(saturation); p=(const char *) content; GetNextToken(p,&p,MagickPathExtent,token); color_correction.saturation=StringToDouble(token,(char **) NULL); } } ccc=DestroyXMLTree(ccc); if (image->debug != MagickFalse) { (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Color Correction Collection:"); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.slope: %g",color_correction.red.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.offset: %g",color_correction.red.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.red.power: %g",color_correction.red.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.slope: %g",color_correction.green.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.offset: %g",color_correction.green.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.green.power: %g",color_correction.green.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.slope: %g",color_correction.blue.slope); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.offset: %g",color_correction.blue.offset); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.blue.power: %g",color_correction.blue.power); (void) LogMagickEvent(TransformEvent,GetMagickModule(), " color_correction.saturation: %g",color_correction.saturation); } cdl_map=(PixelInfo *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*cdl_map)); if (cdl_map == (PixelInfo *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); for (i=0; i <= (ssize_t) MaxMap; i++) { cdl_map[i].red=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.red.slope*i/MaxMap+ color_correction.red.offset,color_correction.red.power)))); cdl_map[i].green=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.green.slope*i/MaxMap+ color_correction.green.offset,color_correction.green.power)))); cdl_map[i].blue=(double) ScaleMapToQuantum((double) (MaxMap*(pow(color_correction.blue.slope*i/MaxMap+ color_correction.blue.offset,color_correction.blue.power)))); } if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Apply transfer function to colormap. */ double luma; luma=0.21267f*image->colormap[i].red+0.71526*image->colormap[i].green+ 0.07217f*image->colormap[i].blue; image->colormap[i].red=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].red))].red-luma; image->colormap[i].green=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].green))].green-luma; image->colormap[i].blue=luma+color_correction.saturation*cdl_map[ ScaleQuantumToMap(ClampToQuantum(image->colormap[i].blue))].blue-luma; } /* Apply transfer function to image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double luma; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { luma=0.21267f*GetPixelRed(image,q)+0.71526*GetPixelGreen(image,q)+ 0.07217f*GetPixelBlue(image,q); SetPixelRed(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelRed(image,q))].red-luma)),q); SetPixelGreen(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelGreen(image,q))].green-luma)),q); SetPixelBlue(image,ClampToQuantum(luma+color_correction.saturation* (cdl_map[ScaleQuantumToMap(GetPixelBlue(image,q))].blue-luma)),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ColorDecisionListImageChannel) #endif proceed=SetImageProgress(image,ColorDecisionListCorrectImageTag, progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); cdl_map=(PixelInfo *) RelinquishMagickMemory(cdl_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastImage() enhances the intensity differences between the lighter and % darker elements of the image. Set sharpen to a MagickTrue to increase the % image contrast otherwise the contrast is reduced. % % The format of the ContrastImage method is: % % MagickBooleanType ContrastImage(Image *image, % const MagickBooleanType sharpen,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o exception: return any errors or warnings in this structure. % */ static void Contrast(const int sign,double *red,double *green,double *blue) { double brightness, hue, saturation; /* Enhance contrast: dark color become darker, light color become lighter. */ assert(red != (double *) NULL); assert(green != (double *) NULL); assert(blue != (double *) NULL); hue=0.0; saturation=0.0; brightness=0.0; ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); brightness+=0.5*sign*(0.5*(sin((double) (MagickPI*(brightness-0.5)))+1.0)- brightness); if (brightness > 1.0) brightness=1.0; else if (brightness < 0.0) brightness=0.0; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } MagickExport MagickBooleanType ContrastImage(Image *image, const MagickBooleanType sharpen,ExceptionInfo *exception) { #define ContrastImageTag "Contrast/Image" CacheView *image_view; int sign; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateContrastImage(image,sharpen,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); sign=sharpen != MagickFalse ? 1 : -1; if (image->storage_class == PseudoClass) { /* Contrast enhance colormap. */ for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; Contrast(sign,&red,&green,&blue); image->colormap[i].red=(MagickRealType) red; image->colormap[i].green=(MagickRealType) green; image->colormap[i].blue=(MagickRealType) blue; } } /* Contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double blue, green, red; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); Contrast(sign,&red,&green,&blue); SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastImage) #endif proceed=SetImageProgress(image,ContrastImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o n t r a s t S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ContrastStretchImage() is a simple image enhancement technique that attempts % to improve the contrast in an image by 'stretching' the range of intensity % values it contains to span a desired range of values. It differs from the % more sophisticated histogram equalization in that it can only apply a % linear scaling function to the image pixel values. As a result the % 'enhancement' is less harsh. % % The format of the ContrastStretchImage method is: % % MagickBooleanType ContrastStretchImage(Image *image, % const char *levels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o levels: Specify the levels where the black and white points have the % range of 0 to number-of-pixels (e.g. 1%, 10x90%, etc.). % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ContrastStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define MaxRange(color) ((double) ScaleQuantumToMap((Quantum) (color))) #define ContrastStretchImageTag "ContrastStretch/Image" CacheView *image_view; double *black, *histogram, *stretch_map, *white; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate histogram and stretch map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageGray(image,exception) != MagickFalse) (void) SetImageColorspace(image,GRAYColorspace,exception); black=(double *) AcquireQuantumMemory(GetPixelChannels(image),sizeof(*black)); white=(double *) AcquireQuantumMemory(GetPixelChannels(image),sizeof(*white)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)* sizeof(*histogram)); stretch_map=(double *) AcquireQuantumMemory(MaxMap+1UL, GetPixelChannels(image)*sizeof(*stretch_map)); if ((black == (double *) NULL) || (white == (double *) NULL) || (histogram == (double *) NULL) || (stretch_map == (double *) NULL)) { if (stretch_map != (double *) NULL) stretch_map=(double *) RelinquishMagickMemory(stretch_map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (white != (double *) NULL) white=(double *) RelinquishMagickMemory(white); if (black != (double *) NULL) black=(double *) RelinquishMagickMemory(black); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) ResetMagickMemory(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; pixel=GetPixelIntensity(image,p); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { if (image->channel_mask != DefaultChannels) pixel=(double) p[i]; histogram[GetPixelChannels(image)*ScaleQuantumToMap( ClampToQuantum(pixel))+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black/white levels. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; black[i]=0.0; white[i]=MaxRange(QuantumRange); intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > black_point) break; } black[i]=(double) j; intensity=0.0; for (j=(ssize_t) MaxMap; j != 0; j--) { intensity+=histogram[GetPixelChannels(image)*j+i]; if (intensity > ((double) image->columns*image->rows-white_point)) break; } white[i]=(double) j; } histogram=(double *) RelinquishMagickMemory(histogram); /* Stretch the histogram to create the stretched image mapping. */ (void) ResetMagickMemory(stretch_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*stretch_map)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; for (j=0; j <= (ssize_t) MaxMap; j++) { double gamma; gamma=PerceptibleReciprocal(white[i]-black[i]); if (j < (ssize_t) black[i]) stretch_map[GetPixelChannels(image)*j+i]=0.0; else if (j > (ssize_t) white[i]) stretch_map[GetPixelChannels(image)*j+i]=(double) QuantumRange; else stretch_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum( (double) (MaxMap*gamma*(j-black[i]))); } } if (image->storage_class == PseudoClass) { register ssize_t j; /* Stretch-contrast colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,RedPixelChannel); image->colormap[j].red=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+i]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,GreenPixelChannel); image->colormap[j].green=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+i]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,BluePixelChannel); image->colormap[j].blue=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+i]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { i=GetPixelChannelOffset(image,AlphaPixelChannel); image->colormap[j].alpha=stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+i]; } } } /* Stretch-contrast image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=ClampToQuantum(stretch_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ContrastStretchImage) #endif proceed=SetImageProgress(image,ContrastStretchImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); stretch_map=(double *) RelinquishMagickMemory(stretch_map); white=(double *) RelinquishMagickMemory(white); black=(double *) RelinquishMagickMemory(black); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E n h a n c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EnhanceImage() applies a digital filter that improves the quality of a % noisy image. % % The format of the EnhanceImage method is: % % Image *EnhanceImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *EnhanceImage(const Image *image,ExceptionInfo *exception) { #define EnhanceImageTag "Enhance/Image" #define EnhancePixel(weight) \ mean=QuantumScale*((double) GetPixelRed(image,r)+pixel.red)/2.0; \ distance=QuantumScale*((double) GetPixelRed(image,r)-pixel.red); \ distance_squared=(4.0+mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelGreen(image,r)+pixel.green)/2.0; \ distance=QuantumScale*((double) GetPixelGreen(image,r)-pixel.green); \ distance_squared+=(7.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlue(image,r)+pixel.blue)/2.0; \ distance=QuantumScale*((double) GetPixelBlue(image,r)-pixel.blue); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelBlack(image,r)+pixel.black)/2.0; \ distance=QuantumScale*((double) GetPixelBlack(image,r)-pixel.black); \ distance_squared+=(5.0-mean)*distance*distance; \ mean=QuantumScale*((double) GetPixelAlpha(image,r)+pixel.alpha)/2.0; \ distance=QuantumScale*((double) GetPixelAlpha(image,r)-pixel.alpha); \ distance_squared+=(5.0-mean)*distance*distance; \ if (distance_squared < 0.069) \ { \ aggregate.red+=(weight)*GetPixelRed(image,r); \ aggregate.green+=(weight)*GetPixelGreen(image,r); \ aggregate.blue+=(weight)*GetPixelBlue(image,r); \ aggregate.black+=(weight)*GetPixelBlack(image,r); \ aggregate.alpha+=(weight)*GetPixelAlpha(image,r); \ total_weight+=(weight); \ } \ r+=GetPixelChannels(image); CacheView *enhance_view, *image_view; Image *enhance_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Initialize enhanced image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); enhance_image=CloneImage(image,image->columns,image->rows,MagickTrue, exception); if (enhance_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(enhance_image,DirectClass,exception) == MagickFalse) { enhance_image=DestroyImage(enhance_image); return((Image *) NULL); } /* Enhance image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); enhance_view=AcquireAuthenticCacheView(enhance_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,enhance_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { PixelInfo pixel; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; ssize_t center; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-2,y-2,image->columns+4,5,exception); q=QueueCacheViewAuthenticPixels(enhance_view,0,y,enhance_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(2*(image->columns+4)+2); GetPixelInfo(image,&pixel); for (x=0; x < (ssize_t) image->columns; x++) { double distance, distance_squared, mean, total_weight; PixelInfo aggregate; register const Quantum *magick_restrict r; if (GetPixelWriteMask(image,p) == 0) { SetPixelBackgoundColor(enhance_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(enhance_image); continue; } GetPixelInfo(image,&aggregate); total_weight=0.0; GetPixelInfoPixel(image,p+center,&pixel); r=p; EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); r=p+GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+2*GetPixelChannels(image)*(image->columns+4); EnhancePixel(10.0); EnhancePixel(40.0); EnhancePixel(80.0); EnhancePixel(40.0); EnhancePixel(10.0); r=p+3*GetPixelChannels(image)*(image->columns+4); EnhancePixel(8.0); EnhancePixel(20.0); EnhancePixel(40.0); EnhancePixel(20.0); EnhancePixel(8.0); r=p+4*GetPixelChannels(image)*(image->columns+4); EnhancePixel(5.0); EnhancePixel(8.0); EnhancePixel(10.0); EnhancePixel(8.0); EnhancePixel(5.0); pixel.red=((aggregate.red+total_weight/2.0)/total_weight); pixel.green=((aggregate.green+total_weight/2.0)/total_weight); pixel.blue=((aggregate.blue+total_weight/2.0)/total_weight); pixel.black=((aggregate.black+total_weight/2.0)/total_weight); pixel.alpha=((aggregate.alpha+total_weight/2.0)/total_weight); SetPixelViaPixelInfo(image,&pixel,q); p+=GetPixelChannels(image); q+=GetPixelChannels(enhance_image); } if (SyncCacheViewAuthenticPixels(enhance_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EnhanceImage) #endif proceed=SetImageProgress(image,EnhanceImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } enhance_view=DestroyCacheView(enhance_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) enhance_image=DestroyImage(enhance_image); return(enhance_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E q u a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % EqualizeImage() applies a histogram equalization to the image. % % The format of the EqualizeImage method is: % % MagickBooleanType EqualizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType EqualizeImage(Image *image, ExceptionInfo *exception) { #define EqualizeImageTag "Equalize/Image" CacheView *image_view; double black[CompositePixelChannel+1], *equalize_map, *histogram, *map, white[CompositePixelChannel+1]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize histogram arrays. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateEqualizeImage(image,exception) != MagickFalse) return(MagickTrue); #endif if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); equalize_map=(double *) AcquireQuantumMemory(MaxMap+1UL, GetPixelChannels(image)*sizeof(*equalize_map)); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)* sizeof(*histogram)); map=(double *) AcquireQuantumMemory(MaxMap+1UL,GetPixelChannels(image)* sizeof(*map)); if ((equalize_map == (double *) NULL) || (histogram == (double *) NULL) || (map == (double *) NULL)) { if (map != (double *) NULL) map=(double *) RelinquishMagickMemory(map); if (histogram != (double *) NULL) histogram=(double *) RelinquishMagickMemory(histogram); if (equalize_map != (double *) NULL) equalize_map=(double *) RelinquishMagickMemory(equalize_map); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Form histogram. */ status=MagickTrue; (void) ResetMagickMemory(histogram,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; intensity=p[i]; if ((image->channel_mask & SyncChannels) != 0) intensity=GetPixelIntensity(image,p); histogram[GetPixelChannels(image)*ScaleQuantumToMap(intensity)+i]++; } p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Integrate the histogram to get the equalization map. */ for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double intensity; register ssize_t j; intensity=0.0; for (j=0; j <= (ssize_t) MaxMap; j++) { intensity+=histogram[GetPixelChannels(image)*j+i]; map[GetPixelChannels(image)*j+i]=intensity; } } (void) ResetMagickMemory(equalize_map,0,(MaxMap+1)*GetPixelChannels(image)* sizeof(*equalize_map)); (void) ResetMagickMemory(black,0,sizeof(*black)); (void) ResetMagickMemory(white,0,sizeof(*white)); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { register ssize_t j; black[i]=map[i]; white[i]=map[GetPixelChannels(image)*MaxMap+i]; if (black[i] != white[i]) for (j=0; j <= (ssize_t) MaxMap; j++) equalize_map[GetPixelChannels(image)*j+i]=(double) ScaleMapToQuantum((double) ((MaxMap*(map[ GetPixelChannels(image)*j+i]-black[i]))/(white[i]-black[i]))); } histogram=(double *) RelinquishMagickMemory(histogram); map=(double *) RelinquishMagickMemory(map); if (image->storage_class == PseudoClass) { register ssize_t j; /* Equalize colormap. */ for (j=0; j < (ssize_t) image->colors; j++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel=GetPixelChannelChannel(image,RedPixelChannel); if (black[channel] != white[channel]) image->colormap[j].red=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].red))+ channel]; } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel=GetPixelChannelChannel(image, GreenPixelChannel); if (black[channel] != white[channel]) image->colormap[j].green=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].green))+ channel]; } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel=GetPixelChannelChannel(image,BluePixelChannel); if (black[channel] != white[channel]) image->colormap[j].blue=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].blue))+ channel]; } if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) { PixelChannel channel=GetPixelChannelChannel(image, AlphaPixelChannel); if (black[channel] != white[channel]) image->colormap[j].alpha=equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(ClampToQuantum(image->colormap[j].alpha))+ channel]; } } } /* Equalize image. */ progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if (((traits & UpdatePixelTrait) == 0) || (black[j] == white[j])) continue; q[j]=ClampToQuantum(equalize_map[GetPixelChannels(image)* ScaleQuantumToMap(q[j])+j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_EqualizeImage) #endif proceed=SetImageProgress(image,EqualizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); equalize_map=(double *) RelinquishMagickMemory(equalize_map); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G a m m a I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GammaImage() gamma-corrects a particular image channel. The same % image viewed on different devices will have perceptual differences in the % way the image's intensities are represented on the screen. Specify % individual gamma levels for the red, green, and blue channels, or adjust % all three with the gamma parameter. Values typically range from 0.8 to 2.3. % % You can also reduce the influence of a particular channel with a gamma % value of 0. % % The format of the GammaImage method is: % % MagickBooleanType GammaImage(Image *image,const double gamma, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o level: the image gamma as a string (e.g. 1.6,1.2,1.0). % % o gamma: the image gamma. % */ static inline double gamma_pow(const double value,const double gamma) { return(value < 0.0 ? value : pow(value,gamma)); } MagickExport MagickBooleanType GammaImage(Image *image,const double gamma, ExceptionInfo *exception) { #define GammaCorrectImageTag "GammaCorrect/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; Quantum *gamma_map; register ssize_t i; ssize_t y; /* Allocate and initialize gamma maps. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (gamma == 1.0) return(MagickTrue); gamma_map=(Quantum *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*gamma_map)); if (gamma_map == (Quantum *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); (void) ResetMagickMemory(gamma_map,0,(MaxMap+1)*sizeof(*gamma_map)); if (gamma != 0.0) for (i=0; i <= (ssize_t) MaxMap; i++) gamma_map[i]=ScaleMapToQuantum((double) (MaxMap*pow((double) i/ MaxMap,1.0/gamma))); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Gamma-correct colormap. */ #if !defined(MAGICKCORE_HDRI_SUPPORT) if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].red))]; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].green))]; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].blue))]; if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) gamma_map[ScaleQuantumToMap( ClampToQuantum(image->colormap[i].alpha))]; #else if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].red,1.0/gamma); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].green,1.0/gamma); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].blue,1.0/gamma); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=QuantumRange*gamma_pow(QuantumScale* image->colormap[i].alpha,1.0/gamma); #endif } /* Gamma-correct image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; #if !defined(MAGICKCORE_HDRI_SUPPORT) q[j]=gamma_map[ScaleQuantumToMap(q[j])]; #else q[j]=QuantumRange*gamma_pow(QuantumScale*q[j],1.0/gamma); #endif } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GammaImage) #endif proceed=SetImageProgress(image,GammaCorrectImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); gamma_map=(Quantum *) RelinquishMagickMemory(gamma_map); if (image->gamma != 0.0) image->gamma*=gamma; return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G r a y s c a l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GrayscaleImage() converts the image to grayscale. % % The format of the GrayscaleImage method is: % % MagickBooleanType GrayscaleImage(Image *image, % const PixelIntensityMethod method ,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o method: the pixel intensity method. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType GrayscaleImage(Image *image, const PixelIntensityMethod method,ExceptionInfo *exception) { #define GrayscaleImageTag "Grayscale/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { if (SyncImage(image,exception) == MagickFalse) return(MagickFalse); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); } #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateGrayscaleImage(image,method,exception) != MagickFalse) { image->intensity=method; image->type=GrayscaleType; return(SetImageColorspace(image,GRAYColorspace,exception)); } #endif /* Grayscale image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType blue, green, red, intensity; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } red=(MagickRealType) GetPixelRed(image,q); green=(MagickRealType) GetPixelGreen(image,q); blue=(MagickRealType) GetPixelBlue(image,q); intensity=0.0; switch (method) { case AveragePixelIntensityMethod: { intensity=(red+green+blue)/3.0; break; } case BrightnessPixelIntensityMethod: { intensity=MagickMax(MagickMax(red,green),blue); break; } case LightnessPixelIntensityMethod: { intensity=(MagickMin(MagickMin(red,green),blue)+ MagickMax(MagickMax(red,green),blue))/2.0; break; } case MSPixelIntensityMethod: { intensity=(MagickRealType) (((double) red*red+green*green+ blue*blue)/3.0); break; } case Rec601LumaPixelIntensityMethod: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec601LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.298839*red+0.586811*green+0.114350*blue; break; } case Rec709LumaPixelIntensityMethod: default: { if (image->colorspace == RGBColorspace) { red=EncodePixelGamma(red); green=EncodePixelGamma(green); blue=EncodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case Rec709LuminancePixelIntensityMethod: { if (image->colorspace == sRGBColorspace) { red=DecodePixelGamma(red); green=DecodePixelGamma(green); blue=DecodePixelGamma(blue); } intensity=0.212656*red+0.715158*green+0.072186*blue; break; } case RMSPixelIntensityMethod: { intensity=(MagickRealType) (sqrt((double) red*red+green*green+ blue*blue)/sqrt(3.0)); break; } } SetPixelGray(image,ClampToQuantum(intensity),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GrayscaleImage) #endif proceed=SetImageProgress(image,GrayscaleImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); image->intensity=method; image->type=GrayscaleType; return(SetImageColorspace(image,GRAYColorspace,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H a l d C l u t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HaldClutImage() applies a Hald color lookup table to the image. A Hald % color lookup table is a 3-dimensional color cube mapped to 2 dimensions. % Create it with the HALD coder. You can apply any color transformation to % the Hald image and then use this method to apply the transform to the % image. % % The format of the HaldClutImage method is: % % MagickBooleanType HaldClutImage(Image *image,Image *hald_image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image, which is replaced by indexed CLUT values % % o hald_image: the color lookup table image for replacement color values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType HaldClutImage(Image *image, const Image *hald_image,ExceptionInfo *exception) { #define HaldClutImageTag "Clut/Image" typedef struct _HaldInfo { double x, y, z; } HaldInfo; CacheView *hald_view, *image_view; double width; MagickBooleanType status; MagickOffsetType progress; PixelInfo zero; size_t cube_size, length, level; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(hald_image != (Image *) NULL); assert(hald_image->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (image->alpha_trait == UndefinedPixelTrait) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel,exception); /* Hald clut image. */ status=MagickTrue; progress=0; length=(size_t) MagickMin((MagickRealType) hald_image->columns, (MagickRealType) hald_image->rows); for (level=2; (level*level*level) < length; level++) ; level*=level; cube_size=level*level; width=(double) hald_image->columns; GetPixelInfo(hald_image,&zero); hald_view=AcquireVirtualCacheView(hald_image,exception); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double offset; HaldInfo point; PixelInfo pixel, pixel1, pixel2, pixel3, pixel4; point.x=QuantumScale*(level-1.0)*GetPixelRed(image,q); point.y=QuantumScale*(level-1.0)*GetPixelGreen(image,q); point.z=QuantumScale*(level-1.0)*GetPixelBlue(image,q); offset=point.x+level*floor(point.y)+cube_size*floor(point.z); point.x-=floor(point.x); point.y-=floor(point.y); point.z-=floor(point.z); pixel1=zero; (void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); pixel2=zero; (void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); pixel3=zero; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, point.y,&pixel3); offset+=cube_size; (void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset,width),floor(offset/width),&pixel1,exception); (void) InterpolatePixelInfo(hald_image,hald_view,hald_image->interpolate, fmod(offset+level,width),floor((offset+level)/width),&pixel2,exception); pixel4=zero; CompositePixelInfoAreaBlend(&pixel1,pixel1.alpha,&pixel2,pixel2.alpha, point.y,&pixel4); pixel=zero; CompositePixelInfoAreaBlend(&pixel3,pixel3.alpha,&pixel4,pixel4.alpha, point.z,&pixel); if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) SetPixelRed(image,ClampToQuantum(pixel.red),q); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) SetPixelGreen(image,ClampToQuantum(pixel.green),q); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) SetPixelBlue(image,ClampToQuantum(pixel.blue),q); if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) SetPixelBlack(image,ClampToQuantum(pixel.black),q); if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) SetPixelAlpha(image,ClampToQuantum(pixel.alpha),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_HaldClutImage) #endif proceed=SetImageProgress(image,HaldClutImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } hald_view=DestroyCacheView(hald_view); image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImage() adjusts the levels of a particular image channel by % scaling the colors falling between specified white and black points to % the full available quantum range. % % The parameters provided represent the black, and white points. The black % point specifies the darkest color in the image. Colors darker than the % black point are set to zero. White point specifies the lightest color in % the image. Colors brighter than the white point are set to the maximum % quantum value. % % If a '!' flag is given, map black and white colors to the given levels % rather than mapping those levels to black and white. See % LevelizeImage() below. % % Gamma specifies a gamma correction to apply to the image. % % The format of the LevelImage method is: % % MagickBooleanType LevelImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o exception: return any errors or warnings in this structure. % */ static inline double LevelPixel(const double black_point, const double white_point,const double gamma,const double pixel) { double level_pixel, scale; if (fabs(white_point-black_point) < MagickEpsilon) return(pixel); scale=1.0/(white_point-black_point); level_pixel=QuantumRange*gamma_pow(scale*((double) pixel-black_point), 1.0/gamma); return(level_pixel); } MagickExport MagickBooleanType LevelImage(Image *image,const double black_point, const double white_point,const double gamma,ExceptionInfo *exception) { #define LevelImageTag "Level/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].red)); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].green)); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].blue)); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) ClampToQuantum(LevelPixel(black_point, white_point,gamma,image->colormap[i].alpha)); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=ClampToQuantum(LevelPixel(black_point,white_point,gamma, (double) q[j])); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelImage) #endif proceed=SetImageProgress(image,LevelImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); (void) ClampImage(image,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelizeImage() applies the reversed LevelImage() operation to just % the specific channels specified. It compresses the full range of color % values, so that they lie between the given black and white points. Gamma is % applied before the values are mapped. % % LevelizeImage() can be called with by using a +level command line % API option, or using a '!' on a -level or LevelImage() geometry string. % % It can be used to de-contrast a greyscale image to the exact levels % specified. Or by using specific levels for each channel of an image you % can convert a gray-scale image to any linear color gradient, according to % those levels. % % The format of the LevelizeImage method is: % % MagickBooleanType LevelizeImage(Image *image,const double black_point, % const double white_point,const double gamma,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: The level to map zero (black) to. % % o white_point: The level to map QuantumRange (white) to. % % o gamma: adjust gamma by this factor before mapping values. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelizeImage(Image *image, const double black_point,const double white_point,const double gamma, ExceptionInfo *exception) { #define LevelizeImageTag "Levelize/Image" #define LevelizeValue(x) ClampToQuantum(((MagickRealType) gamma_pow((double) \ (QuantumScale*(x)),gamma))*(white_point-black_point)+black_point) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Level colormap. */ if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(double) LevelizeValue(image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(double) LevelizeValue( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(double) LevelizeValue(image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(double) LevelizeValue( image->colormap[i].alpha); } /* Level image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=LevelizeValue(q[j]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_LevelizeImage) #endif proceed=SetImageProgress(image,LevelizeImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColors() maps the given color to "black" and "white" values, % linearly spreading out the colors, and level values on a channel by channel % bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % The format of the LevelImageColors method is: % % MagickBooleanType LevelImageColors(Image *image, % const PixelInfo *black_color,const PixelInfo *white_color, % const MagickBooleanType invert,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LevelImageColors(Image *image, const PixelInfo *black_color,const PixelInfo *white_color, const MagickBooleanType invert,ExceptionInfo *exception) { ChannelType channel_mask; MagickStatusType status; /* Allocate and initialize levels map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((IsGrayColorspace(image->colorspace) != MagickFalse) && ((IsGrayColorspace(black_color->colorspace) == MagickFalse) || (IsGrayColorspace(white_color->colorspace) == MagickFalse))) (void) SetImageColorspace(image,sRGBColorspace,exception); status=MagickTrue; if (invert == MagickFalse) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } else { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,RedChannel); status&=LevelizeImage(image,black_color->red,white_color->red,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,GreenChannel); status&=LevelizeImage(image,black_color->green,white_color->green,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) { channel_mask=SetImageChannelMask(image,BlueChannel); status&=LevelizeImage(image,black_color->blue,white_color->blue,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelBlackTraits(image) & UpdatePixelTrait) != 0) && (image->colorspace == CMYKColorspace)) { channel_mask=SetImageChannelMask(image,BlackChannel); status&=LevelizeImage(image,black_color->black,white_color->black,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } if (((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) && (image->alpha_trait != UndefinedPixelTrait)) { channel_mask=SetImageChannelMask(image,AlphaChannel); status&=LevelizeImage(image,black_color->alpha,white_color->alpha,1.0, exception); (void) SetImageChannelMask(image,channel_mask); } } return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i n e a r S t r e t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LinearStretchImage() discards any pixels below the black point and above % the white point and levels the remaining pixels. % % The format of the LinearStretchImage method is: % % MagickBooleanType LinearStretchImage(Image *image, % const double black_point,const double white_point, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o black_point: the black point. % % o white_point: the white point. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType LinearStretchImage(Image *image, const double black_point,const double white_point,ExceptionInfo *exception) { #define LinearStretchImageTag "LinearStretch/Image" CacheView *image_view; double *histogram, intensity; MagickBooleanType status; ssize_t black, white, y; /* Allocate histogram and linear map. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); histogram=(double *) AcquireQuantumMemory(MaxMap+1UL,sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); /* Form histogram. */ (void) ResetMagickMemory(histogram,0,(MaxMap+1)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { intensity=GetPixelIntensity(image,p); histogram[ScaleQuantumToMap(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Find the histogram boundaries by locating the black and white point levels. */ intensity=0.0; for (black=0; black < (ssize_t) MaxMap; black++) { intensity+=histogram[black]; if (intensity >= black_point) break; } intensity=0.0; for (white=(ssize_t) MaxMap; white != 0; white--) { intensity+=histogram[white]; if (intensity >= white_point) break; } histogram=(double *) RelinquishMagickMemory(histogram); status=LevelImage(image,(double) ScaleMapToQuantum((MagickRealType) black), (double) ScaleMapToQuantum((MagickRealType) white),1.0,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d u l a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModulateImage() lets you control the brightness, saturation, and hue % of an image. Modulate represents the brightness, saturation, and hue % as one parameter (e.g. 90,150,100). If the image colorspace is HSL, the % modulation is lightness, saturation, and hue. For HWB, use blackness, % whiteness, and hue. And for HCL, use chrome, luma, and hue. % % The format of the ModulateImage method is: % % MagickBooleanType ModulateImage(Image *image,const char *modulate, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o modulate: Define the percent change in brightness, saturation, and hue. % % o exception: return any errors or warnings in this structure. % */ static inline void ModulateHCL(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCL(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHCLp(const double percent_hue, const double percent_chroma,const double percent_luma,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToHCLp(*red,*green,*blue,&hue,&chroma,&luma); hue+=fmod((percent_hue-100.0),200.0)/200.0; chroma*=0.01*percent_chroma; luma*=0.01*percent_luma; ConvertHCLpToRGB(hue,chroma,luma,red,green,blue); } static inline void ModulateHSB(const double percent_hue, const double percent_saturation,const double percent_brightness,double *red, double *green,double *blue) { double brightness, hue, saturation; /* Increase or decrease color brightness, saturation, or hue. */ ConvertRGBToHSB(*red,*green,*blue,&hue,&saturation,&brightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; brightness*=0.01*percent_brightness; ConvertHSBToRGB(hue,saturation,brightness,red,green,blue); } static inline void ModulateHSI(const double percent_hue, const double percent_saturation,const double percent_intensity,double *red, double *green,double *blue) { double intensity, hue, saturation; /* Increase or decrease color intensity, saturation, or hue. */ ConvertRGBToHSI(*red,*green,*blue,&hue,&saturation,&intensity); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; intensity*=0.01*percent_intensity; ConvertHSIToRGB(hue,saturation,intensity,red,green,blue); } static inline void ModulateHSL(const double percent_hue, const double percent_saturation,const double percent_lightness,double *red, double *green,double *blue) { double hue, lightness, saturation; /* Increase or decrease color lightness, saturation, or hue. */ ConvertRGBToHSL(*red,*green,*blue,&hue,&saturation,&lightness); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; lightness*=0.01*percent_lightness; ConvertHSLToRGB(hue,saturation,lightness,red,green,blue); } static inline void ModulateHSV(const double percent_hue, const double percent_saturation,const double percent_value,double *red, double *green,double *blue) { double hue, saturation, value; /* Increase or decrease color value, saturation, or hue. */ ConvertRGBToHSV(*red,*green,*blue,&hue,&saturation,&value); hue+=fmod((percent_hue-100.0),200.0)/200.0; saturation*=0.01*percent_saturation; value*=0.01*percent_value; ConvertHSVToRGB(hue,saturation,value,red,green,blue); } static inline void ModulateHWB(const double percent_hue, const double percent_whiteness,const double percent_blackness,double *red, double *green,double *blue) { double blackness, hue, whiteness; /* Increase or decrease color blackness, whiteness, or hue. */ ConvertRGBToHWB(*red,*green,*blue,&hue,&whiteness,&blackness); hue+=fmod((percent_hue-100.0),200.0)/200.0; blackness*=0.01*percent_blackness; whiteness*=0.01*percent_whiteness; ConvertHWBToRGB(hue,whiteness,blackness,red,green,blue); } static inline void ModulateLCHab(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHab(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHabToRGB(luma,chroma,hue,red,green,blue); } static inline void ModulateLCHuv(const double percent_luma, const double percent_chroma,const double percent_hue,double *red, double *green,double *blue) { double hue, luma, chroma; /* Increase or decrease color luma, chroma, or hue. */ ConvertRGBToLCHuv(*red,*green,*blue,&luma,&chroma,&hue); luma*=0.01*percent_luma; chroma*=0.01*percent_chroma; hue+=fmod((percent_hue-100.0),200.0)/200.0; ConvertLCHuvToRGB(luma,chroma,hue,red,green,blue); } MagickExport MagickBooleanType ModulateImage(Image *image,const char *modulate, ExceptionInfo *exception) { #define ModulateImageTag "Modulate/Image" CacheView *image_view; ColorspaceType colorspace; const char *artifact; double percent_brightness, percent_hue, percent_saturation; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; MagickStatusType flags; register ssize_t i; ssize_t y; /* Initialize modulate table. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (modulate == (char *) NULL) return(MagickFalse); if (IssRGBCompatibleColorspace(image->colorspace) == MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); flags=ParseGeometry(modulate,&geometry_info); percent_brightness=geometry_info.rho; percent_saturation=geometry_info.sigma; if ((flags & SigmaValue) == 0) percent_saturation=100.0; percent_hue=geometry_info.xi; if ((flags & XiValue) == 0) percent_hue=100.0; colorspace=UndefinedColorspace; artifact=GetImageArtifact(image,"modulate:colorspace"); if (artifact != (const char *) NULL) colorspace=(ColorspaceType) ParseCommandOption(MagickColorspaceOptions, MagickFalse,artifact); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { double blue, green, red; /* Modulate image colormap. */ red=(double) image->colormap[i].red; green=(double) image->colormap[i].green; blue=(double) image->colormap[i].blue; switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSIColorspace: { ModulateHSI(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHColorspace: case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } image->colormap[i].red=red; image->colormap[i].green=green; image->colormap[i].blue=blue; } /* Modulate image. */ #if defined(MAGICKCORE_OPENCL_SUPPORT) if (AccelerateModulateImage(image,percent_brightness,percent_hue, percent_saturation,colorspace,exception) != MagickFalse) return(MagickTrue); #endif status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double blue, green, red; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } red=(double) GetPixelRed(image,q); green=(double) GetPixelGreen(image,q); blue=(double) GetPixelBlue(image,q); switch (colorspace) { case HCLColorspace: { ModulateHCL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HCLpColorspace: { ModulateHCLp(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSBColorspace: { ModulateHSB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSLColorspace: default: { ModulateHSL(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HSVColorspace: { ModulateHSV(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case HWBColorspace: { ModulateHWB(percent_hue,percent_saturation,percent_brightness, &red,&green,&blue); break; } case LCHabColorspace: { ModulateLCHab(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } case LCHColorspace: case LCHuvColorspace: { ModulateLCHuv(percent_brightness,percent_saturation,percent_hue, &red,&green,&blue); break; } } SetPixelRed(image,ClampToQuantum(red),q); SetPixelGreen(image,ClampToQuantum(green),q); SetPixelBlue(image,ClampToQuantum(blue),q); q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ModulateImage) #endif proceed=SetImageProgress(image,ModulateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N e g a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % NegateImage() negates the colors in the reference image. The grayscale % option means that only grayscale values within the image are negated. % % The format of the NegateImage method is: % % MagickBooleanType NegateImage(Image *image, % const MagickBooleanType grayscale,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o grayscale: If MagickTrue, only negate grayscale pixels within the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NegateImage(Image *image, const MagickBooleanType grayscale,ExceptionInfo *exception) { #define NegateImageTag "Negate/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) for (i=0; i < (ssize_t) image->colors; i++) { /* Negate colormap. */ if( grayscale != MagickFalse ) if ((image->colormap[i].red != image->colormap[i].green) || (image->colormap[i].green != image->colormap[i].blue)) continue; if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=QuantumRange-image->colormap[i].red; if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=QuantumRange-image->colormap[i].green; if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=QuantumRange-image->colormap[i].blue; } /* Negate image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); if( grayscale != MagickFalse ) { for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1, exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if ((GetPixelWriteMask(image,q) == 0) || IsPixelGray(image,q) != MagickFalse) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(image_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImage) #endif proceed=SetImageProgress(image,NegateImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(MagickTrue); } /* Negate image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t j; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (j=0; j < (ssize_t) GetPixelChannels(image); j++) { PixelChannel channel=GetPixelChannelChannel(image,j); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[j]=QuantumRange-q[j]; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_NegateImage) #endif proceed=SetImageProgress(image,NegateImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % N o r m a l i z e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % The NormalizeImage() method enhances the contrast of a color image by % mapping the darkest 2 percent of all pixel to black and the brightest % 1 percent to white. % % The format of the NormalizeImage method is: % % MagickBooleanType NormalizeImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType NormalizeImage(Image *image, ExceptionInfo *exception) { double black_point, white_point; black_point=(double) image->columns*image->rows*0.0015; white_point=(double) image->columns*image->rows*0.9995; return(ContrastStretchImage(image,black_point,white_point,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S i g m o i d a l C o n t r a s t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SigmoidalContrastImage() adjusts the contrast of an image with a non-linear % sigmoidal contrast algorithm. Increase the contrast of the image using a % sigmoidal transfer function without saturating highlights or shadows. % Contrast indicates how much to increase the contrast (0 is none; 3 is % typical; 20 is pushing it); mid-point indicates where midtones fall in the % resultant image (0 is white; 50% is middle-gray; 100% is black). Set % sharpen to MagickTrue to increase the image contrast otherwise the contrast % is reduced. % % The format of the SigmoidalContrastImage method is: % % MagickBooleanType SigmoidalContrastImage(Image *image, % const MagickBooleanType sharpen,const char *levels, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o sharpen: Increase or decrease image contrast. % % o contrast: strength of the contrast, the larger the number the more % 'threshold-like' it becomes. % % o midpoint: midpoint of the function as a color value 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ /* ImageMagick 6 has a version of this function which uses LUTs. */ /* Sigmoidal function Sigmoidal with inflexion point moved to b and "slope constant" set to a. The first version, based on the hyperbolic tangent tanh, when combined with the scaling step, is an exact arithmetic clone of the the sigmoid function based on the logistic curve. The equivalence is based on the identity 1/(1+exp(-t)) = (1+tanh(t/2))/2 (http://de.wikipedia.org/wiki/Sigmoidfunktion) and the fact that the scaled sigmoidal derivation is invariant under affine transformations of the ordinate. The tanh version is almost certainly more accurate and cheaper. The 0.5 factor in the argument is to clone the legacy ImageMagick behavior. The reason for making the define depend on atanh even though it only uses tanh has to do with the construction of the inverse of the scaled sigmoidal. */ #if defined(MAGICKCORE_HAVE_ATANH) #define Sigmoidal(a,b,x) ( tanh((0.5*(a))*((x)-(b))) ) #else #define Sigmoidal(a,b,x) ( 1.0/(1.0+exp((a)*((b)-(x)))) ) #endif /* Scaled sigmoidal function: ( Sigmoidal(a,b,x) - Sigmoidal(a,b,0) ) / ( Sigmoidal(a,b,1) - Sigmoidal(a,b,0) ) See http://osdir.com/ml/video.image-magick.devel/2005-04/msg00006.html and http://www.cs.dartmouth.edu/farid/downloads/tutorials/fip.pdf. The limit of ScaledSigmoidal as a->0 is the identity, but a=0 gives a division by zero. This is fixed below by exiting immediately when contrast is small, leaving the image (or colormap) unmodified. This appears to be safe because the series expansion of the logistic sigmoidal function around x=b is 1/2-a*(b-x)/4+... so that the key denominator s(1)-s(0) is about a/4 (a/2 with tanh). */ #define ScaledSigmoidal(a,b,x) ( \ (Sigmoidal((a),(b),(x))-Sigmoidal((a),(b),0.0)) / \ (Sigmoidal((a),(b),1.0)-Sigmoidal((a),(b),0.0)) ) /* Inverse of ScaledSigmoidal, used for +sigmoidal-contrast. Because b may be 0 or 1, the argument of the hyperbolic tangent (resp. logistic sigmoidal) may be outside of the interval (-1,1) (resp. (0,1)), even when creating a LUT from in gamut values, hence the branching. In addition, HDRI may have out of gamut values. InverseScaledSigmoidal is not a two-sided inverse of ScaledSigmoidal: It is only a right inverse. This is unavoidable. */ static inline double InverseScaledSigmoidal(const double a,const double b, const double x) { const double sig0=Sigmoidal(a,b,0.0); const double sig1=Sigmoidal(a,b,1.0); const double argument=(sig1-sig0)*x+sig0; const double clamped= ( #if defined(MAGICKCORE_HAVE_ATANH) argument < -1+MagickEpsilon ? -1+MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b+(2.0/a)*atanh(clamped)); #else argument < MagickEpsilon ? MagickEpsilon : ( argument > 1-MagickEpsilon ? 1-MagickEpsilon : argument ) ); return(b-log(1.0/clamped-1.0)/a); #endif } MagickExport MagickBooleanType SigmoidalContrastImage(Image *image, const MagickBooleanType sharpen,const double contrast,const double midpoint, ExceptionInfo *exception) { #define SigmoidalContrastImageTag "SigmoidalContrast/Image" #define ScaledSig(x) ( ClampToQuantum(QuantumRange* \ ScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) #define InverseScaledSig(x) ( ClampToQuantum(QuantumRange* \ InverseScaledSigmoidal(contrast,QuantumScale*midpoint,QuantumScale*(x))) ) CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Convenience macros. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); /* Side effect: may clamp values unless contrast<MagickEpsilon, in which case nothing is done. */ if (contrast < MagickEpsilon) return(MagickTrue); /* Sigmoidal-contrast enhance colormap. */ if (image->storage_class == PseudoClass) { register ssize_t i; if( sharpen != MagickFalse ) for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) ScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) ScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) ScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) ScaledSig( image->colormap[i].alpha); } else for (i=0; i < (ssize_t) image->colors; i++) { if ((GetPixelRedTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].red=(MagickRealType) InverseScaledSig( image->colormap[i].red); if ((GetPixelGreenTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].green=(MagickRealType) InverseScaledSig( image->colormap[i].green); if ((GetPixelBlueTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].blue=(MagickRealType) InverseScaledSig( image->colormap[i].blue); if ((GetPixelAlphaTraits(image) & UpdatePixelTrait) != 0) image->colormap[i].alpha=(MagickRealType) InverseScaledSig( image->colormap[i].alpha); } } /* Sigmoidal-contrast enhance image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { q+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if( sharpen != MagickFalse ) q[i]=ScaledSig(q[i]); else q[i]=InverseScaledSig(q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_SigmoidalContrastImage) #endif proceed=SetImageProgress(image,SigmoidalContrastImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
mxnet_op.h
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /*! * Copyright (c) 2017 by Contributors * \file mxnet_op.h * \brief * \author Junyuan Xie */ #ifndef MXNET_OPERATOR_MXNET_OP_H_ #define MXNET_OPERATOR_MXNET_OP_H_ #include <dmlc/omp.h> #include <mxnet/base.h> #include <mxnet/engine.h> #include <mxnet/op_attr_types.h> #include <algorithm> #include "./operator_tune.h" #include "../engine/openmp.h" #ifdef __CUDACC__ #include "../common/cuda_utils.h" #endif // __CUDACC__ namespace mxnet { namespace op { namespace mxnet_op { using namespace mshadow; #ifdef __CUDA_ARCH__ __constant__ const float PI = 3.14159265358979323846; #else const float PI = 3.14159265358979323846; using std::isnan; #endif template<typename xpu> int get_num_threads(const int N); #ifdef __CUDACC__ #define CUDA_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; \ i < (n); \ i += blockDim.x * gridDim.x) inline cudaDeviceProp cuda_get_device_prop() { int device; CUDA_CALL(cudaGetDevice(&device)); cudaDeviceProp deviceProp; CUDA_CALL(cudaGetDeviceProperties(&deviceProp, device)); return deviceProp; } /*! * \brief Get the number of blocks for cuda kernel given N */ inline int cuda_get_num_blocks(const int N) { using namespace mshadow::cuda; return std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); } template<> inline int get_num_threads<gpu>(const int N) { using namespace mshadow::cuda; return kBaseThreadNum * cuda_get_num_blocks(N); } #endif // __CUDACC__ template<> inline int get_num_threads<cpu>(const int N) { return engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); } /*! \brief operator request type switch */ #define MXNET_ASSIGN_REQ_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } /*! \brief operator request type switch */ #define MXNET_REQ_TYPE_SWITCH(req, ReqType, ...) \ switch (req) { \ case kNullOp: \ { \ const OpReqType ReqType = kNullOp; \ {__VA_ARGS__} \ } \ break; \ case kWriteInplace: \ case kWriteTo: \ { \ const OpReqType ReqType = kWriteTo; \ {__VA_ARGS__} \ } \ break; \ case kAddTo: \ { \ const OpReqType ReqType = kAddTo; \ {__VA_ARGS__} \ } \ break; \ default: \ break; \ } #define MXNET_NDIM_SWITCH(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NDIM_SWITCH_EX(NDim, ndim, ...) \ if (NDim == 0) { \ } else if (NDim == 1) { \ const int ndim = 1; \ {__VA_ARGS__} \ } else if (NDim == 2) { \ const int ndim = 2; \ {__VA_ARGS__} \ } else if (NDim == 3) { \ const int ndim = 3; \ {__VA_ARGS__} \ } else if (NDim == 4) { \ const int ndim = 4; \ {__VA_ARGS__} \ } else if (NDim == 5) { \ const int ndim = 5; \ {__VA_ARGS__} \ } else if (NDim == 6) { \ const int ndim = 6; \ {__VA_ARGS__} \ } else if (NDim == 7) { \ const int ndim = 7; \ {__VA_ARGS__} \ } else if (NDim == 8) { \ const int ndim = 8; \ {__VA_ARGS__} \ } else if (NDim == 9) { \ const int ndim = 9; \ {__VA_ARGS__} \ } else if (NDim == 10) { \ const int ndim = 10; \ {__VA_ARGS__} \ } else { \ LOG(FATAL) << "ndim=" << NDim << "too large "; \ } #define MXNET_NO_INT8_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ case mshadow::kBfloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt8: \ LOG(FATAL) << "This operation does not " \ "support int8 or uint8"; \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_NO_FLOAT16_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ LOG(FATAL) << "This operation does not " \ "support float16"; \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } template <typename T> struct AccType { using type = T; }; template <> struct AccType<mshadow::half::half_t> { using type = float; }; #define MXNET_REAL_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int8_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int32_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int32"; \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not int64"; \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ LOG(FATAL) << "This operation only support " \ "floating point types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_ACC_TYPE_SWITCH(type, DType, AType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ typedef double AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ typedef float AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ typedef uint32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ typedef int32_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ typedef int64_t AType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt8: \ { \ typedef int8_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ typedef bool DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_INT32_INT64_TYPE_SWITCH(type, DType, ...)\ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float32"; \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float64"; \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ LOG(FATAL) << "This operation only support " \ "integer types, not float16"; \ } \ break; \ case mshadow::kUint8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not uint8"; \ } \ break; \ case mshadow::kInt8: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not int8"; \ } \ break; \ case mshadow::kInt32: \ { \ typedef int32_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kInt64: \ { \ typedef int64_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kBool: \ { \ LOG(FATAL) << "This operation only support " \ "integer types, not bool"; \ } \ break; \ default: \ LOG(FATAL) << "Unknown type enum " << type; \ } #define MXNET_LOAD_TYPE_SWITCH(type, DType, ...) \ switch (type) { \ case mshadow::kFloat32: \ { \ typedef float DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat64: \ { \ typedef double DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kFloat16: \ { \ typedef mshadow::half::half_t DType; \ {__VA_ARGS__} \ } \ break; \ case mshadow::kUint8: \ { \ typedef uint8_t DType; \ {__VA_ARGS__} \ } \ break; \ default: \ LOG(FATAL) << "Invalid loading enum type " << type; \ } /*! * \brief assign the val to out according * to request in Kernel::Launch * \param out the data to be assigned * \param req the assignment request * \param val the value to be assigned to out * \tparam OType output type * \tparam VType value type */ #define KERNEL_ASSIGN(out, req, val) \ { \ switch (req) { \ case kNullOp: \ break; \ case kWriteTo: \ case kWriteInplace: \ (out) = (val); \ break; \ case kAddTo: \ (out) += (val); \ break; \ default: \ break; \ } \ } #define MXNET_ADD_ALL_TYPES \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("bfloat16", mshadow::kBfloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) #define MXNET_ADD_ALL_TYPES_WITH_BOOL \ .add_enum("float32", mshadow::kFloat32) \ .add_enum("float64", mshadow::kFloat64) \ .add_enum("float16", mshadow::kFloat16) \ .add_enum("bfloat16", mshadow::kBfloat16) \ .add_enum("uint8", mshadow::kUint8) \ .add_enum("int8", mshadow::kInt8) \ .add_enum("int32", mshadow::kInt32) \ .add_enum("int64", mshadow::kInt64) \ .add_enum("bool", mshadow::kBool) /* \brief Compute flattened index given coordinates and shape. */ template<int ndim> MSHADOW_XINLINE index_t ravel(const Shape<ndim>& coord, const Shape<ndim>& shape) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret = ret * shape[i] + (shape[i] > coord[i]) * coord[i]; } return ret; } /* Compute coordinates from flattened index given shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> unravel(const index_t idx, const Shape<ndim>& shape) { Shape<ndim> ret; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret[i] = j - tmp*shape[i]; j = tmp; } return ret; } /* Compute dot product of two vector */ template<int ndim> MSHADOW_XINLINE index_t dot(const Shape<ndim>& coord, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (int i = 0; i < ndim; ++i) { ret += coord[i] * stride[i]; } return ret; } /* Combining unravel and dot */ template<int ndim> MSHADOW_XINLINE index_t unravel_dot(const index_t idx, const Shape<ndim>& shape, const Shape<ndim>& stride) { index_t ret = 0; #pragma unroll for (index_t i = ndim-1, j = idx; i >=0; --i) { auto tmp = j / shape[i]; ret += (j - tmp*shape[i])*stride[i]; j = tmp; } return ret; } /* Calculate stride of each dim from shape */ template<int ndim> MSHADOW_XINLINE Shape<ndim> calc_stride(const Shape<ndim>& shape) { Shape<ndim> stride; index_t cumprod = 1; #pragma unroll for (int i = ndim - 1; i >= 0; --i) { stride[i] = (shape[i] > 1) ? cumprod : 0; cumprod *= shape[i]; } return stride; } /* Increment coordinates */ template<int ndim> MSHADOW_XINLINE bool inc(Shape<ndim>* coord, const Shape<ndim>& shape) { ++(*coord)[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; } return (*coord)[0] < shape[0]; } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx, const Shape<ndim>& stride) { ++(*coord)[ndim-1]; *idx += stride[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx = *idx + stride[i-1] - shape[i] * stride[i]; } } /* Increment coordinates and modify index */ template<int ndim> MSHADOW_XINLINE void inc(Shape<ndim>* coord, const Shape<ndim>& shape, index_t* idx1, const Shape<ndim>& stride1, index_t* idx2, const Shape<ndim>& stride2) { ++(*coord)[ndim-1]; *idx1 += stride1[ndim-1]; *idx2 += stride2[ndim-1]; #pragma unroll for (int i = ndim - 1; i > 0 && (*coord)[i] >= shape[i]; --i) { (*coord)[i] -= shape[i]; ++(*coord)[i-1]; *idx1 = *idx1 + stride1[i-1] - shape[i] * stride1[i]; *idx2 = *idx2 + stride2[i-1] - shape[i] * stride2[i]; } } /*! * \brief Simple copy data from one blob to another * \param to Destination blob * \param from Source blob */ template <typename xpu> MSHADOW_CINLINE void copy(mshadow::Stream<xpu> *s, const TBlob& to, const TBlob& from) { CHECK_EQ(from.Size(), to.Size()); CHECK_EQ(from.dev_mask(), to.dev_mask()); MSHADOW_TYPE_SWITCH_WITH_BOOL(to.type_flag_, DType, { if (to.type_flag_ == from.type_flag_) { mshadow::Copy(to.FlatTo1D<xpu, DType>(s), from.FlatTo1D<xpu, DType>(s), s); } else { MSHADOW_TYPE_SWITCH_WITH_BOOL(from.type_flag_, SrcDType, { to.FlatTo1D<xpu, DType>(s) = mshadow::expr::tcast<DType>(from.FlatTo1D<xpu, SrcDType>(s)); }) } }) } /*! \brief Binary op backward gradient OP wrapper */ template<typename GRAD_OP> struct backward_grad { /* \brief Backward calc with grad * \param a - output grad * \param args... - data to grad calculation op (what this is -- input, output, etc. -- varies) * \return input grad */ template<typename DType, typename ...Args> MSHADOW_XINLINE static DType Map(DType a, Args... args) { return DType(a * GRAD_OP::Map(args...)); } }; template<typename OP, int req> struct mixed_type_unary_op { typedef OP Operation; /*! \brief input is one tensor */ template<typename OType, typename IType> MSHADOW_XINLINE static void Map(index_t i, OType *out, const IType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(OType(in[i]))); } }; /*! \brief Binary op backward gradient OP wrapper (tuned) */ template<typename GRAD_OP> struct backward_grad_tuned : public backward_grad<GRAD_OP>, public tunable { using backward_grad<GRAD_OP>::Map; }; /*! \brief Select assignment operation based upon the req value * Also useful for mapping mshadow Compute (F<OP>) to Kernel<OP>::Launch */ template<typename OP, int req> struct op_with_req { typedef OP Operation; /*! \brief input is one tensor */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is tensor and two scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *in, const DType value_1, const DType value_2) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value_1, value_2)); } /*! \brief No inputs (ie fill to constant value) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { KERNEL_ASSIGN(out[i], req, OP::Map()); } /*! \brief input is single scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(value)); } /*! \brief inputs are two tensors and a scalar value */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], value)); } /*! \brief inputs are three tensors (ie backward grad with binary grad function) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out, const DType *input_1, const DType *input_2, const DType *input_3) { KERNEL_ASSIGN(out[i], req, OP::Map(input_1[i], input_2[i], input_3[i])); } /*! \brief input is a tensor and the output is a boolean tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i])); } /*! \brief inputs are two tensors with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is tensor and two scalar value with a boolean output tensor */ template<typename DType, typename std::enable_if<!std::is_same<DType, bool>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } /*! \brief input is two tensors with different type and with a boolean output tensor */ template<typename LType, typename RType, typename std::enable_if<!std::is_same<LType, RType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, bool *out, const LType *lhs, const RType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief inputs are two tensors with a half_t output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, mshadow::half::half_t *out, const DType *lhs, const mshadow::half::half_t value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const float value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a double output tensor */ template<typename DType, typename std::enable_if<std::is_same<DType, mshadow::half::half_t>::value || std::is_same<DType, float>::value || std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, double *out, const DType *lhs, const double value) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], value)); } /*! \brief inputs are two tensors with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *lhs, const DType *rhs) { KERNEL_ASSIGN(out[i], req, OP::Map(lhs[i], rhs[i])); } /*! \brief input is a tensor and a scalar value with a float output tensor */ template<typename DType, typename std::enable_if<std::is_integral<DType>::value, int>::type = 0> MSHADOW_XINLINE static void Map(index_t i, float *out, const DType *in, const DType value) { KERNEL_ASSIGN(out[i], req, OP::Map(in[i], value)); } }; template<typename OP, typename xpu> struct Kernel; /*! * \brief CPU Kernel launcher * \tparam OP Operator to launch */ template<typename OP> struct Kernel<OP, cpu> { /*! * \brief Launch a generic CPU kernel. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool Launch(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch a generic CPU kernel with dynamic schedule. This is recommended * for irregular workloads such as spmv. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function */ template<typename ...Args> inline static bool LaunchDynamic(mshadow::Stream<cpu> *, const int64_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(false); if (omp_threads < 2) { for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) schedule(dynamic) for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } } #else for (int64_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif return true; } /*! * \brief Launch CPU kernel which has OMP tuning data available. * When using this for a new kernel op, add declaration and tuning objects to * operator_tune.cc * \tparam PRIMITIVE_OP The primitive operation to use for tuning * \tparam DType Data type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param dest Destination pointer (used to infer DType) * \param args Varargs to eventually pass to the OP::Map() function */ template<typename PRIMITIVE_OP, typename DType, typename ...Args> static void LaunchTuned(mshadow::Stream<cpu> *, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2 || !tuned_op<PRIMITIVE_OP, DType>::UseOMP( N, static_cast<size_t>(omp_threads))) { for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } } else { #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); ++i) { OP::Map(i, args...); } } #else for (size_t i = 0; i < N; ++i) { OP::Map(i, args...); } #endif } /*! * \brief Launch custom-tuned kernel where each thread is set to * operate on a contiguous partition * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param N Number of iterations * \param args Varargs to eventually pass to the UseOMP() and OP::Map() functions */ template<typename ...Args> inline static void LaunchEx(mshadow::Stream<cpu> *s, const size_t N, Args... args) { #ifdef _OPENMP const int omp_threads = engine::OpenMP::Get()->GetRecommendedOMPThreadCount(); if (omp_threads < 2) { OP::Map(0, N, args...); } else { const auto length = (N + omp_threads - 1) / omp_threads; #pragma omp parallel for num_threads(omp_threads) for (index_t i = 0; i < static_cast<index_t>(N); i += length) { OP::Map(i, i + length > N ? N - i : length, args...); } } #else OP::Map(0, N, args...); #endif } /*! * \brief Launch a tunable OP with implicitly-supplied data type * \tparam DType Data type * \tparam T OP type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, T>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<T, DType>(s, N, dest, args...); return true; } /*! * \brief Launch a tunable OP wrapper with explicitly-supplied data type (ie op_with_req) * \tparam DType Data type * \tparam T Wrapper type * \tparam Args Varargs type to eventually pass to the OP::Map() function * \param s Stream (usually null for CPU) * \param N Number of iterations * \param args Varargs to eventually pass to the OP::Map() function * \return Always true */ template<typename DType, typename T = OP, typename ...Args> static MSHADOW_CINLINE typename std::enable_if<std::is_base_of<tunable, typename T::Operation>::value, bool>::type Launch(mshadow::Stream<cpu> *s, const size_t N, DType *dest, Args... args) { LaunchTuned<typename T::Operation, DType>(s, N, dest, args...); return true; } }; #ifdef __CUDACC__ template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, args...); } } template<typename OP, typename ...Args> __global__ void mxnet_generic_kernel_ex(int N, Args... args) { for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < N; i += blockDim.x * gridDim.x) { OP::Map(i, 1, args...); } } template<typename OP> struct Kernel<OP, gpu> { /*! \brief Launch GPU kernel */ template<typename ...Args> inline static void Launch(mshadow::Stream<gpu> *s, int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel); } template<typename ...Args> inline static void LaunchEx(mshadow::Stream<gpu> *s, const int N, Args... args) { if (0 == N) return; using namespace mshadow::cuda; int ngrid = std::min(kMaxGridNum, (N + kBaseThreadNum - 1) / kBaseThreadNum); mxnet_generic_kernel_ex<OP, Args...> <<<ngrid, kBaseThreadNum, 0, mshadow::Stream<gpu>::GetStream(s)>>>( N, args...); MSHADOW_CUDA_POST_KERNEL_CHECK(mxnet_generic_kernel_ex); } }; #endif // __CUDACC__ /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<int val> struct set_to_int : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to zero and one */ using set_zero = set_to_int<0>; using set_one = set_to_int<1>; /*! * \brief Set to immediate scalar value kernel * \tparam val Scalar immediate */ template<bool val> struct set_to_bool : public tunable { // mxnet_op version (when used directly with Kernel<>::Launch()) */ template<typename DType> MSHADOW_XINLINE static void Map(index_t i, DType *out) { out[i] = DType(val); } // mshadow_op version (when used with op_with_req<>) MSHADOW_XINLINE static int Map() { return val; } }; /*! * \brief Special-case kernel shortcut for setting to true and false */ using set_true = set_to_bool<true>; using set_false = set_to_bool<false>; } // namespace mxnet_op } // namespace op } // namespace mxnet #endif // MXNET_OPERATOR_MXNET_OP_H_
heimdallr.c
// // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // (C) Copyright [2020-2021] Hewlett Packard Enterprise Development LP // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included // in all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL // THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR // OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, // ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR // OTHER DEALINGS IN THE SOFTWARE. // // author: Nathan Wichmann (wichmann@hpe.com) // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // // Heimdallr65 is a driver that implements a number of tests that perform // random "updates" to a global memory space. Heimdallr is can be used for both // performance and functional testing. Tests often comes in pairs, once implemented // using shmem atomics, and once using the HABU library. When possible, the results // of the two tests are compared for correctness. In many (most) cases we expect zero errors, // but in some cases we do expect a very small number of errors. // One can look at only the shmem performance, or both shmem and HABU performance. #include <stdio.h> #include <string.h> #include <time.h> #include <sys/times.h> #include <stdlib.h> #include <unistd.h> #include <getopt.h> #include <stdint.h> #include <stdbool.h> #include <omp.h> #include <mpp/shmem.h> #include <mpp/shmemx.h> #include <math.h> #include <assert.h> #include "habu.h" /* Package Information */ #define PACKAGE_NAME "Heimdallr65" #define PACKAGE_VERSION "2.0" /* Macros for timing */ struct tms tt; #define WSEC() (times(&tt) / (double)sysconf(_SC_CLK_TCK)) #define CPUSEC() (clock() / (double)CLOCKS_PER_SEC) #define GIBI 1073741824L #define MIBI 1048576L #define OPT_HELP 1 //#define MAXBLKSIZE 2048 #define MAXBLKSIZE 4096 //#define MAXBLKSIZE 610 // #define MAXBLKSIZE 61 // #define nrepeats 4 #define THREAD_HOT int64_t ONE=1; int64_t NEGONE=-1; typedef struct opts_t { int check; } opts_t; // Define a local structure to be used in the HABU user defined function struct hrp_local{ habu_op_t HABU_RP; int64_t tabsize; int64_t npes; }; void hrp(habu_mem_t thandle, int64_t ioffset,void *arg_v,void *largs,int ctxt){ int64_t *array = habu_handle_to_pointer(thandle); int64_t *val = arg_v; int ipe=shmem_my_pe(); struct hrp_local *hrpl = largs; // printf("mype= %d %d hrp ioff= %ld array=%ld val = %ld %ld\n", ipe,omp_get_thread_num(), ioffset,array[ioffset],val[0],val[1]);fflush(stdout); // printf("HABU_RP %ld %ld\n",HABU_RP,hrpl->HABU_RP); if(array[ioffset]==val[0]){ array[ioffset]=val[1]; }else if(array[ioffset]==val[1]){ ipe = (ipe+919)%hrpl->npes; ioffset = (ioffset+907)&(hrpl->tabsize-1); habu_op( thandle,ioffset,ipe, hrpl->HABU_RP,val,ctxt); }else{ // This should only happen if there is an error in our test array[ioffset]=-val[1]; } } void fhrp(habu_mem_t rhandle,int64_t return_index, int return_pe, habu_mem_t thandle, int64_t ioffset,void *arg_v,void *largs,int ctxt){ int64_t *array = habu_handle_to_pointer(thandle); int64_t *val = arg_v; int ipe=shmem_my_pe(); struct hrp_local *hfrpl = largs; // printf("mype= %d %d hrp ioff= %ld array=%ld val = %ld %ld\n", ipe,omp_get_thread_num(), ioffset,array[ioffset],val[0],val[1]);fflush(stdout); // printf("HABU_RP %ld %ld\n",HABU_RP,hrpl->HABU_RP); if(array[ioffset]==val[0]){ array[ioffset]=val[1]; habu_op( rhandle,return_index,return_pe, HABU_PUT,&ioffset,ctxt); }else if(array[ioffset]==val[1]){ ipe = (ipe+919)%hfrpl->npes; ioffset = (ioffset+907)&(hfrpl->tabsize-1); habu_fop(rhandle,return_index,return_pe, thandle,ioffset,ipe, hfrpl->HABU_RP,val,ctxt); }else{ // This should only happen if there is an error in our test array[ioffset]=-val[1]; } } typedef struct { size_t nrepeats; size_t l2tabsize; size_t l2nupdates; size_t minwords; size_t maxwords; size_t ranpelist; int msgrowth; char singletest[14]; } options_t; #define numtests 33 char tnames[numtests][14]; int nbytes[numtests]; double nups[numtests]; int skiptest[numtests]; int END_SW_ITEST=0; int END_CORREST_TEST=0; options_t opts; void print_usage(char ***argv, FILE *file) { char *name = strrchr((*argv)[0], '/'); name = (name == NULL ? (*argv)[0] : name+1); int mype = 0; mype = shmem_my_pe(); if (mype == 0) printf( "Usage: %s [OPTION..]\n" "Options:\n" " -t, \t\t Table size (as log base 2) per PE\n" " -n, \t\t Nupdates (as log base 2) per PE\n" " -r, \t\t Number of Repeats completed within a timed test\n" " -T, \t\t Name of single test to be executed\n" " -w, \t\t Min Message size (in 8B words)\n" " -W, \t\t Max Message size (in 8B words)\n" " -G, \t\t Message size growth factor (integer)\n" " -h, \t\t Display this information\n", name); } options_t check_args(int *argc, char***argv) { uint64_t seed = 0; seed = time(NULL); options_t opts = { .nrepeats = (size_t)4, .l2tabsize = (size_t)25, .l2nupdates = (size_t)23, .maxwords = (size_t)1 << 13, .minwords = (size_t)1 , .msgrowth = (int)1, }; strncpy(opts.singletest, "ALL",14); int i=0; strncpy(tnames[i], "ATOMIC_NA_INC",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=1; i++; strncpy(tnames[i], "HABU_NA_INC",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=1; i++; strncpy(tnames[i], "ATOMIC_NA_ADD",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=1; i++; strncpy(tnames[i], "HABU_NA_ADD",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=1; i++; strncpy(tnames[i], "ATOMIC_INC",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "HABU_INC",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "ATOMIC_ADD",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "HABU_ADD",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "AMO_PEADD",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=1; i++; strncpy(tnames[i], "HABU_PEADD",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=1; i++; strncpy(tnames[i], "ATOMIC_ADD2",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "HABU_I&A",14); nups[i]=2; nbytes[i]= 16; skiptest[i]=0; i++; strncpy(tnames[i], "ATOMIC_FADD",14); nups[i]=1; nbytes[i]= 2*8;skiptest[i]=0; i++; strncpy(tnames[i], "HABU_FADD",14); nups[i]=1; nbytes[i]= 2*8;skiptest[i]=0; i++; strncpy(tnames[i], "ATOMIC_RP",14); nups[i]=opts.nrepeats*(opts.nrepeats+1.0)/2.0/(opts.nrepeats*2.0)/opts.nrepeats; nbytes[i]= 2*8;skiptest[i]=0; i++; strncpy(tnames[i], "HABU_RP",14); nups[i]=opts.nrepeats*(opts.nrepeats+1.0)/2.0/(opts.nrepeats*2.0)/opts.nrepeats; nbytes[i]= 2*8;skiptest[i]=0; i++; strncpy(tnames[i], "ATOMIC_FRP",14); nups[i]=opts.nrepeats*(opts.nrepeats+1.0)/2.0/(opts.nrepeats*2.0)/opts.nrepeats; nbytes[i]= 2*8;skiptest[i]=0; i++; strncpy(tnames[i], "HABU_FRP",14); nups[i]=opts.nrepeats*(opts.nrepeats+1.0)/2.0/(opts.nrepeats*2.0)/opts.nrepeats; nbytes[i]= 2*8;skiptest[i]=0; i++; strncpy(tnames[i], "GET_NB_lsheap",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "HABU_GET",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; END_SW_ITEST=i; i++; strncpy(tnames[i], "GET_NB_lsheap",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "HABU_GETV",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=1;END_CORREST_TEST=i; i++; strncpy(tnames[i], "PUT_lsheap",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "HABU_PUTV",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "GET_NB_lheap",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "GET_NB_lstack",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "GET_lsheap",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "PUT_NB_lsheap",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "PUT_NB_lheap",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "PUT_NB_lstack",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "PUT_SIG",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "PUT_SIG_NB",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; strncpy(tnames[i], "PUT_manSIG",14); nups[i]=1; nbytes[i]= 8; skiptest[i]=0; i++; opterr = 0; int opt; int firstst=0; while ((opt = getopt(*argc, *argv, "hr:T:t:w:W:Lr:G:n:")) != -1) { switch (opt) { case 'r': opts.nrepeats = strtoul(optarg, NULL, 0); break; case 'T': strncpy(opts.singletest, optarg,14); if(strncmp(opts.singletest,"ALL",14)!=0 ){ for(int itest=0;itest<numtests;itest++){ if(firstst==0) skiptest[itest]=1; if(strncmp(tnames[itest],opts.singletest,strlen(opts.singletest))==0 )skiptest[itest]=0; } firstst=1; } break; case 't': opts.l2tabsize = strtoul(optarg, NULL, 0); break; case 'n': opts.l2nupdates= strtoul(optarg, NULL, 0); break; case 'w': opts.minwords = strtoul(optarg, NULL, 0); break; case 'W': opts.maxwords = strtoul(optarg, NULL, 0); break; case 'G': opts.msgrowth = strtoul(optarg, NULL, 0); break; case 'h': print_usage(argv, stdout); shmem_finalize(); exit(EXIT_SUCCESS); default: print_usage(argv, stderr); shmem_finalize(); exit(EXIT_FAILURE); } } for(int itest=0;itest<numtests;itest++){ if(itest<=END_SW_ITEST && opts.minwords>1) skiptest[itest]=1; // Skip this test } return opts; } int handle_options(int argc, char *argv[], int rank, opts_t *bench) { while (1) { static struct option long_options[] = { {"help", no_argument, 0, 'h'}, {"check", no_argument, 0, 'c'}, {0, 0, 0, 0} }; /* getopt_long stores the option index here. */ int option_index = 0; int c = getopt_long(argc, argv, "hc", long_options, &option_index); /* Detect the end of the options. */ if (c == -1) break; switch (c) { case 'h': if (rank == 0) return OPT_HELP; case 'c': bench->check = 1; break; case '?': /* getopt_long already printed an error message. */ if (rank == 0) return -1; break; default: abort(); } } return 0; } /******************************** divide up total size (loop iters or space amount) in a blocked way ********************************/ void Block(int myblock, int nblocks, int64_t totalsize, int64_t * start, int64_t * stop, int64_t * size) { int64_t div; int64_t rem; div = totalsize / nblocks; rem = totalsize % nblocks; if (myblock < rem) { *start = myblock * (div + 1); *stop = *start + div; *size = div + 1; } else { *start = myblock * div + rem; *stop = *start + div - 1; *size = div; } } /******************************** update_table ********************************/ void update_table(int64_t tabsize, int64_t nupdate, int64_t *table, int64_t *index, int nrepeats) { uint64_t ran; /* Current random numbers */ uint64_t temp; double icputime; /* CPU time to init table */ double is; double cputime; /* CPU time to update table */ double s; uint64_t *local_table; int64_t i, j; int itest; int64_t one=1; double lrwbw, lrbbw, lwmups, lbmups; int numthreads; int64_t sumval=0; int npes,mype; double nGbytes; int64_t *error_count; int64_t *maxval; int64_t stackmbuf[nupdate+opts.maxwords]; int64_t *sheapmbuf; int64_t *heapmbuf; uint64_t *signal; int64_t oldvalue; int l2tabsize=0; npes = shmem_n_pes(); mype = shmem_my_pe(); // if (mype == 0) {printf("in update_table\n");fflush(stdout);} int lpes = shmemx_local_npes(); int nnodes = (npes+lpes-1)/lpes; if(mype==0){ // if(lpes*nnodes != npes )printf("*** WARNING: lpes*nnodes != npes. Per node calculations may be incorrect. ***\n"); } #pragma omp parallel { numthreads = omp_get_num_threads(); } habu_set_num_contexts(numthreads); error_count = (int64_t*)shmem_malloc((1)*8); // if (mype == 0){ printf("before maxval shmalloc\n");fflush(stdout);} maxval= (int64_t*)shmem_malloc((1)*8); signal = (uint64_t*)shmem_malloc((npes*numthreads)*8); // if (mype == 0){ printf("before sheapmbuf shmalloc\n");fflush(stdout);} sheapmbuf = (int64_t*)shmem_malloc((nupdate+MAXBLKSIZE)*8); if(sheapmbuf==NULL){ printf("Error. Allocation of sheapmbuf was unsuccessful. nupdate=%ld\n",nupdate); return; } // if (mype == 0){ printf("after sheapmbuf shmalloc\n");fflush(stdout);} heapmbuf = (int64_t*)malloc((nupdate+MAXBLKSIZE)*8); if(heapmbuf==NULL){ printf("Error. Allocation of heapmbuf was unsuccessful. nupdate=%ld\n",nupdate); return; } for(i=0;i<nupdate;i++) stackmbuf[i]=0; // if (mype == 0){ printf("got to before l2tabsize while\n");fflush(stdout);} while((1L<<l2tabsize)<tabsize) l2tabsize +=1; // if (mype == 0) printf("got to after l2tabsize while\n"); // printf("* Table length/rank (elements) = %ld words\n", tabsize); // printf("* In update Log2 Table length/rank = %d \n", l2tabsize); // Define the names of tests and control which tests should be run and which should be // skipped. Skiptest==0 means execute the test. int mbsize = tabsize/4; mbsize = mbsize<opts.maxwords?mbsize:opts.maxwords; // if(tabsize<(MAXBLKSIZE*2)){ // if(mype==0)printf("ERROR! Table size is to small for MAXBLKSIZE.\nIncrease table size argument or decrease MAXBLKSIZE in source!\n"); // return; // } /* Initialize index*/ int64_t nthreads; #pragma omp parallel { int64_t MYTHREAD = omp_get_thread_num(); nthreads = omp_get_num_threads(); // printf("%ld thinks there are %ld threads\n",MYTHREAD,nthreads); int64_t start, stop, size; int seed; unsigned short randstate[3]; randstate[0] = 5639*(mype*nthreads + MYTHREAD+1); randstate[1] = 5827*(mype*nthreads + MYTHREAD+1); randstate[2] = 4951*(mype*nthreads + MYTHREAD+1); start = stop = 0; size = nupdate / nthreads; Block(MYTHREAD, nthreads, nupdate, &start, &stop, &size); // printf("%d %ld start= %ld stop= %ld size= %ld nupdate=%ld\n",mype, MYTHREAD,start,stop,size,nupdate); for (int i=start; i<=stop; i++) { index[i] = (int64_t) (erand48 (randstate) * (npes*tabsize)); // printf("mype= %d index %ld= %ld\n",mype,i,index[i]); if(((index[i]&(tabsize-1))+mbsize)>=tabsize) { // printf("mype= %d adjust index %ld .and.tabsize = %ld %d \n",mype,i,((index[i]&(tabsize-1))+mbsize),mbsize); index[i] = index[i]- mbsize ; } int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); // printf("init index mype= %d index %ld= %ld ipe= %d ioff= %ld\n",mype,i,index[i],ipe,ioff); } } if (mype == 0) {printf("%-14s %7s %7s %8s %6s %8s %8s\n","Test","Bytes", "Time","MUPS","MUPS/N","GiB/s","GiB/s/N");fflush(stdout);} int tcount=0; double geobw=1.0; double geomups=1.0; double geosecs=1.0; int64_t maxrlevel = 0; // for(itest=0;itest<2;itest++){ for(itest=0;itest<numtests;itest++){ if(skiptest[itest]==1) continue; // Skip this test int blksize=opts.minwords; //int blksize=8; int blksize2=opts.minwords; int lastblksize=1; // blksize=600; // lastblksize=144; int topblksize=mbsize; // int topblksize=blksize; int sumblksize=0; while(blksize<=topblksize){ /* Initialize main table */ if(itest<=END_SW_ITEST){ topblksize=1; } if((itest%2)==0){ #pragma omp parallel for for(i = 0; i < tabsize; i += 1){ if(strncmp(tnames[itest],"ATOMIC_RP",14)==0 || strncmp(tnames[itest],"ATOMIC_FRP",14)==0 ){ table[i] = 0;} else{ table[i] = mype+i;} } } if(strncmp(tnames[itest],"GET_NB_lsheap",14)==0){ #pragma omp parallel for for(i = 0; i < nupdate; i += 1){ sheapmbuf[i] = -1; heapmbuf[i] = -1; } } sumval=0; sumblksize += blksize; /* Begin timing here */ icputime = -CPUSEC(); is = -WSEC(); // We need to register memory with habu so that it know how to update it habu_mem_t habu_table_handle; if(itest>3){ // Ask for atomicity. habu_table_handle = habu_register_memory(table,sizeof(table[0]),tabsize); }else{ // Test NON-atomicity. habu_table_handle = habu_register_memory(table,sizeof(table[0]),0); } habu_mem_t habu_local_handle = habu_register_memory(heapmbuf,sizeof(heapmbuf),nupdate); // habu_mem_t habu_local_handle = habu_register(heapmbuf,sizeof(heapmbuf),0,nthreads); struct hrp_local hrpl = {-1,tabsize,npes}; struct hrp_local hfrpl = {-1,tabsize,npes}; hrpl.HABU_RP = habu_register_op(hrp,16,&hrpl); hfrpl.HABU_RP = habu_register_fop(fhrp,16,&hfrpl); double hbartime=0.; if (mype == 0) printf("%-14s ",tnames[itest]);fflush(stdout); // hrpl.HABU_RP = HABU_RP; // hrpl.tabsize = tabsize; // hrpl.npes = npes; // hop_tabsize = tabsize; // hop_npes = npes; // if(mype==0) printf("HABU_RP %ld %ld\n",HABU_RP,hrpl.HABU_RP); shmem_barrier_all(); /* Begin timing here */ icputime += CPUSEC(); is += WSEC(); cputime = -CPUSEC(); s = -WSEC(); #pragma omp parallel { int MYTHREAD = omp_get_thread_num(); // printf("%d thinks there are %d threads\n",MYTHREAD,nthreads); int64_t start, stop, size; start = stop = 0; Block(MYTHREAD, nthreads, nupdate, &start, &stop, &size); int64_t mytabstart = (tabsize/nthreads)*MYTHREAD+1; // printf("%d start= %ld stop= %ld size= %ld nupdate=%ld\n",MYTHREAD,start,stop,size,nupdate); int64_t i; int64_t mype64 = mype; int64_t pos; int64_t val=0; double invslice_size = 1.0/((1.0*tabsize)/nthreads+1); for (int ir=0; ir<nrepeats; ir+=1) { // for (i=start; i<=stop; i+=blksize) { // int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); switch(itest){ case 0: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); #ifdef HABU_DEBUG_PRINT_TRACK_ITEMS printf("mype= %d %d i= %ld ipe= %d ioff= %ld\n", mype,omp_get_thread_num(),i,ipe,ioff);fflush(stdout); #endif shmemx_long_add_nb( &table[ioff], -1,ipe,NULL); } break; case 1: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); #ifdef HABU_DEBUG_PRINT_TRACK_ITEMS printf("mype= %d %d i= %ld ipe= %d ioff= %ld\n", mype,omp_get_thread_num(),i,ipe,ioff);fflush(stdout); #endif habu_op( habu_table_handle,ioff,ipe, HABU_INC,&ONE,MYTHREAD); } break; case 2: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmemx_long_add_nb( &table[ioff], -1,ipe,NULL); } break; case 3: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); habu_op( habu_table_handle,ioff,ipe, HABU_ADD,&ONE,MYTHREAD); } break; case 4: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmemx_long_add_nb( &table[ioff], -1,ipe,NULL); }break; case 5: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); habu_op( habu_table_handle,ioff,ipe, HABU_INC,&ONE,MYTHREAD); } break; case 6: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmemx_long_add_nb( &table[ioff], -1,ipe,NULL); } break; case 7: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); habu_op( habu_table_handle,ioff,ipe, HABU_ADD,&ONE,MYTHREAD); } break; case 8: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmemx_long_add_nb( &table[mytabstart], -1,ipe,NULL); } break; case 9: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); habu_op( habu_table_handle,mytabstart,ipe, HABU_ADD,&ONE,MYTHREAD); } break; case 10: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmemx_long_add_nb( &table[ioff], -2,ipe,NULL); } break; case 11: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); habu_op( habu_table_handle,ioff,ipe, HABU_ADD,&ONE,MYTHREAD); habu_op( habu_table_handle,ioff,ipe, HABU_INC,&ONE,MYTHREAD);; } break; case 12: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmemx_long_fadd_nb( &sheapmbuf[i], &table[ioff], -1,ipe,NULL); } break; case 13: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); habu_fop( habu_local_handle,i,mype,habu_table_handle,ioff,ipe, HABU_FADD,&ONE,MYTHREAD); } break; case 14: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); if(i%(nrepeats*2)>0)continue; int64_t rlevel = 1; while(0 != shmem_long_cswap( &table[ioff],0L, 1L,ipe)){ ipe = (ipe+919)%npes; ioff = (ioff+907)&(tabsize-1); rlevel++; //printf("mype= %d %d rp ir=%d i= %ld ipe= %d ioff= %ld\n", mype,omp_get_thread_num(),ir,i,ipe,ioff);fflush(stdout); if(rlevel>1000){ printf("mype= %d %d detected excessive rlevel rp ir=%d i= %ld ipe= %d ioff= %ld\n", mype,omp_get_thread_num(),ir,i,ipe,ioff);fflush(stdout); break; } } maxrlevel = maxrlevel<rlevel?rlevel:maxrlevel; } break; case 15: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); if(i%(nrepeats*2)>0)continue; int64_t pack[2]={1,2}; habu_op( habu_table_handle,ioff,ipe, hrpl.HABU_RP,pack,MYTHREAD); } break; case 16: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); if(i%(nrepeats*2)>0)continue; int64_t rlevel = 1; while(0 != shmem_long_cswap( &table[ioff],0L, 1L,ipe)){ ipe = (ipe+919)%npes; ioff = (ioff+907)&(tabsize-1); rlevel++; // printf("mype= %d %d rp ir=%d i= %ld ipe= %d ioff= %ld\n", mype,omp_get_thread_num(),ir,i,ipe,ioff);fflush(stdout); } sheapmbuf[i]=ioff; maxrlevel = maxrlevel<rlevel?rlevel:maxrlevel; } break; case 17: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); if(i%(nrepeats*2)>0)continue; int64_t pack[2]={1,2}; habu_fop( habu_local_handle,i,mype,habu_table_handle,ioff,ipe, hfrpl.HABU_RP,pack,MYTHREAD); } break; case 18: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmem_get64_nbi( &sheapmbuf[i], &table[ioff], icount,ipe); } break; case 19: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); for(int ii=0;ii<icount;ii++) habu_fop( habu_local_handle,i+ii,mype,habu_table_handle,ioff+ii,ipe, HABU_GET,0,MYTHREAD); } break; case 20: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmem_get64_nbi( &sheapmbuf[i], &table[ioff], icount,ipe); } break; case 21: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); habu_getv(habu_local_handle,i,mype,habu_table_handle,ioff,ipe, icount,MYTHREAD); // for(int ii=0;ii<icount;ii++) // habu_fop( habu_local_handle,i+ii,mype,habu_table_handle,ioff+ii,ipe, HABU_GET,0,MYTHREAD); } break; case 22: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmem_put64( &table[ioff],&sheapmbuf[i], icount,ipe); } break; case 23: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); habu_putv( habu_table_handle, ioff,ipe, &sheapmbuf[i], icount,MYTHREAD); // for(int ii=0;ii<icount;ii++) // habu_op( habu_table_handle,ioff+ii,ipe, HABU_PUT,&sheapmbuf[i+ii],MYTHREAD); } break; case 24: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmem_get64_nbi( &heapmbuf[i], &table[ioff], icount,ipe); } break; case 25: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmem_get64_nbi( &stackmbuf[i], &table[ioff], icount,ipe); } break; case 26: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmem_get64( &sheapmbuf[i], &table[ioff], icount,ipe); } break; case 27: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmem_put64_nbi( &table[ioff],&sheapmbuf[i], icount,ipe); } break; case 28: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmem_put64_nbi( &table[ioff],&heapmbuf[i], icount,ipe); } break; case 29: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmem_put64_nbi( &table[ioff],&stackmbuf[i], icount,ipe); } break; case 30: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmemx_put64_signal(&table[ioff],&sheapmbuf[i],icount,&signal[mype],1,ipe); } break; case 31: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmemx_put64_signal_nb( &table[ioff],&sheapmbuf[i], icount,&signal[mype+MYTHREAD],1,ipe,NULL); } break; case 32: for (i=start; i<=stop; i+=blksize) { int ipe = index[i]>>l2tabsize; int64_t ioff = index[i]&(tabsize-1); int64_t icount = (i+blksize)<=stop?blksize:(stop-i+1); shmem_put64( &table[ioff],&sheapmbuf[i], icount,ipe); shmemx_thread_quiet(); shmem_put64( &signal[mype+MYTHREAD],&ONE,1,ipe); } break; } } // printf("mype= %d tid= %d done0 generating updates\n",mype,MYTHREAD);fflush(stdout); // if(itest%2==1){ // habu_barrier(MYTHREAD); // // printf("mype= %d tid= %d after0 habu_barrier\n",mype,MYTHREAD);fflush(stdout); // } // } if(strncmp(tnames[itest],"HABU",4)==0){ hbartime=-WSEC(); //if(itest%2==1){ // if(mype==0) printf("mype= %d tid= %d done generating updates bf habu barrier itest=%d\n",mype,MYTHREAD,itest);fflush(stdout); habu_barrier(MYTHREAD); // printf("mype= %d tid= %d done generating updates af barrier itest=%d\n",mype,MYTHREAD,itest);fflush(stdout); hbartime+=WSEC(); } // habu_barrier(MYTHREAD,nthreads); // habu_barrier(MYTHREAD,nthreads); // if(itest==0 ||itest==2 ||itest==4 ||itest==6||itest==8) habu_barrier(MYTHREAD,nthreads); // if(itest%2==1) printf("mype= %d tid= %d after habu_barrier itest=%d\n",mype,MYTHREAD,itest); } // Unregister the handles so that those handles can be recycled. habu_unregister_memory(habu_table_handle); habu_unregister_memory(habu_local_handle); habu_unregister_op(hrpl.HABU_RP); habu_unregister_op(hfrpl.HABU_RP); shmem_barrier_all(); /* End timed section */ cputime += CPUSEC(); s += WSEC(); if (mype == 0){ double mups = nups[itest]*(nrepeats*(double) npes*(double)nupdate / s/1000000); // double mpc = ((double) npes*nupdate / s/1000000)/(npes*numthreads); nGbytes= (nups[itest]*nrepeats*(double)npes*nupdate*nbytes[itest]) /GIBI; double bw = nGbytes/s; printf("%7d %7.2lf %8.1lf %6.1lf %8.2lf %8.2lf ", nbytes[itest]*blksize,s,mups,mups/nnodes,bw,bw/nnodes); // printf("hbartime=%lf ",hbartime); if(strncmp(tnames[itest],"ATOMIC_RP",14)==0) {printf("maxrlevel=%ld ",maxrlevel); maxrlevel=0;} } if(1 && (blksize+lastblksize > topblksize)){ // Set if test to true if you want simple error checking on updates int error_print_cnt = 0; if(itest<=END_CORREST_TEST && ((itest%2)==1)){ error_count[0]=0; maxval[0]=0; int64_t ncheck = nupdate; // Run an error check on the target table. for (int i = 0; i < tabsize; i++){ int64_t ref = mype+i; if(strncmp(tnames[itest],"HABU_RP",14)==0 || strncmp(tnames[itest],"HABU_FRP",14)==0) { if(table[i]!=0 && table[i]!=2) { error_count[0] += 1; if(error_count[0]<error_print_cnt) printf("%d table %d = %ld \n",mype,i,table[i]); } } if(strncmp(tnames[itest],"HABU_RP",14)!=0 && strncmp(tnames[itest],"HABU_FRP",14)!=0 ) { if(table[i]!=ref) { error_count[0] += 1; if(error_count[0]<error_print_cnt) printf("%d table %d = %ld \n",mype,i,table[i]); } } if(table[i]<maxval[0]) maxval[0]=table[i]; } // if(strncmp(tnames[itest],"HABU_FADD",14)==0 && nrepeats==1){ if(strncmp(tnames[itest],"HABU_FADD",14)==0 || strncmp(tnames[itest],"HABU_FRP",14)==0 ){ // Run an error check on the fetched values. Only works when nrepeats==1. ncheck += nupdate; int64_t wiggle = 5; if(strncmp(tnames[itest],"HABU_FRP",14)==0) wiggle = 1000; for (int i = 0; i < nupdate; i++) { if(labs(sheapmbuf[i]-heapmbuf[i])>wiggle*nrepeats){ // If the check differs by a handful we can figure that was a simple and expect race error_count[0] += 1; if(error_count[0]<error_print_cnt) printf("%d sheapmbuf %d = %ld heapmbuf= %ld\n",mype,i,sheapmbuf[i],heapmbuf[i]); } } } if(strncmp(tnames[itest],"HABU_GET",14)==0 || strncmp(tnames[itest],"HABU_GETV",14)==0 ){ // Run an error check on the fetched values. Only works when nrepeats==1. // ncheck += nupdate; for (int i = 0; i < nupdate; i++) { if(sheapmbuf[i]!=heapmbuf[i]){ error_count[0] += 1; if(error_count[0]<error_print_cnt) printf("%d sheapmbuf %d = %ld heapmbuf= %ld\n",mype,i,sheapmbuf[i],heapmbuf[i]); } } } shmem_barrier_all(); int64_t val; if(mype==0){ // printf("PE %d error_count= %ld\n",0,error_count[0]); for(i=1;i<npes;i++) { shmem_get64(&val,&error_count[0],1,i); // printf("PE %ld error_count= %ld\n",i,val); error_count[0] += val; shmem_get64(&val,&maxval[0],1,i); // printf("PE %ld error_count= %ld\n",i,val); if(val<maxval[0]) maxval[0]= val; } double error_rate = (100.0*error_count[0])/(npes*ncheck); if(error_count[0]==0) { // If atomicity is working most tests should pass with 100% correctness. printf("PASSED"); }else{ // If atomicity is off, or you are fetching or other cases, we might still count // and error rate of <1% as passing. But if we are getting any errors we // want to do about it so we have a different print. if(error_rate < 1){ printf("PASSED with %.4g%% error rate",error_rate); }else{ // Too many errors means we failed the check! printf("FAILED with %.4g%% error rate",error_rate); } } } } } int b=blksize; blksize+=lastblksize; lastblksize=b; if(opts.msgrowth>1) blksize=b*opts.msgrowth; if(opts.msgrowth<0) { if(8*b<(-1*opts.msgrowth)&&b>=8){blksize=b+4;blksize2=blksize;} else { blksize=b*1.5; if(blksize>=(2*blksize2) || blksize==1){ blksize2=blksize2*2; blksize=blksize2; } } } // lastblksize=b; if(mype==0) printf("\n"); } if(mype==0 && itest%2==1) printf("\n"); shmem_barrier_all(); // Prevents wrap around on the tests } #pragma omp parallel { int tid = omp_get_thread_num(); habu_stats(tid); } } /******************************** main routine ********************************/ int main(int argc, char *argv[]) { int64_t tabsize; int64_t local_tabsize; int64_t nupdate; int numthreads,mytid; int npes,mype; int provided; int64_t *table; int64_t *index; double GiB; int opt_status; #ifdef THREAD_HOT int requested = SHMEM_THREAD_MULTIPLE; #if 0 /* to be used only with OpenSHMEM compliant implementation */ shmem_init_thread(requested, &provided);/* Old Cray SHMEMX API */ assert(requested == provided); #else shmemx_init_thread(requested); #endif #else shmem_init(); #endif npes = shmem_n_pes(); mype = shmem_my_pe(); int lpes = shmemx_local_npes(); int nnodes = (npes+lpes-1)/lpes; if(mype==0){ // if(lpes*nnodes != npes )printf("*** WARNING: lpes*nnodes != npes. Per node calculations may be incorrect. ***\n"); } opts = check_args(&argc, &argv); tabsize = (1L << opts.l2tabsize); nupdate = (1L << opts.l2nupdates); int nrepeats = opts.nrepeats; #pragma omp parallel { int64_t MYTHREAD = omp_get_thread_num(); mytid = MYTHREAD; #ifdef THREAD_HOT shmemx_thread_register(); #endif numthreads = omp_get_num_threads(); } // printf("%d thinks there are %d threads\n",mytid,numthreads); if (!tabsize || !nupdate) { if (mype == 0) { fprintf(stderr, "ERROR: Incorrect command line argument format.\n"); } exit(1); } table = (int64_t*)shmem_malloc((tabsize)*8); if(table==NULL){ printf("Error. Allocation of table was unsuccessful. \n"); return 1; } index = (int64_t*)malloc(nupdate*8); if(index==NULL){ printf("Error. Allocation of index was unsuccessful. \n"); return 1; } GiB = tabsize * 8.0 / GIBI; if (mype == 0) { printf("****************************************************\n"); printf("* %s version %s \n*\n", PACKAGE_NAME, PACKAGE_VERSION); printf("* NPES = %d\n", npes); printf("* NNODES (N) = %d\n", nnodes); printf("* Threads = %d\n", numthreads); printf("* Tests = %s\n", opts.singletest); printf("* Table size/PE (GiB) = %.3f\n", GiB); printf("* Table length/PE (elements) = %ld words\n", tabsize); printf("* Log2 Table length/PE = %zu \n", opts.l2tabsize); printf("* Number of updates/PE = %ld\n", nupdate); printf("* nrepeats = %d\n", nrepeats); if(opts.msgrowth==1){printf("* Msg Size Growth = Fibonacci\n");} else{ printf("* Msg Size Growth = %d\n", opts.msgrowth);} printf("* Index array size/PE (MiB) = %.3f\n", (double)nupdate*8.0/MIBI); printf("* Table array size/PE (MiB) = %.3f\n", (double)tabsize*8.0/MIBI); printf("* Est Memory footprint/PE (MiB) = %.3f\n", (tabsize+4*nupdate)*8.0/MIBI); printf("* Index array size/NODE (MiB) = %.3f\n", lpes*(double)nupdate*8.0/MIBI); printf("* Table array size/NODE (MiB) = %.3f\n", lpes*(double)tabsize*8.0/MIBI); printf("* Est Memory footprint/NODE (MiB)= %.3f\n", lpes*(tabsize+4*nupdate)*8.0/MIBI); printf("****************************************************\n"); } habu_init(numthreads); update_table(tabsize, nupdate, table, index, nrepeats); if(mype==0)printf("\n\nHeimdallr65 has seen your network!\n\n"); shmem_finalize(); return 0; }
reduction.h
#ifndef __DACE_REDUCTION_H #define __DACE_REDUCTION_H #include <cstdint> #include "types.h" #include "math.h" // for ::min, ::max #ifdef __CUDACC__ #include "../../../external/cub/cub/device/device_segmented_reduce.cuh" #include "../../../external/cub/cub/device/device_reduce.cuh" #include "../../../external/cub/cub/block/block_reduce.cuh" #include "../../../external/cub/cub/iterator/counting_input_iterator.cuh" #include "../../../external/cub/cub/iterator/transform_input_iterator.cuh" #endif // Specializations for reductions implemented in frameworks like OpenMP, MPI namespace dace { // Internal type. See below for wcr_fixed external type, which selects // the implementation according to T's properties. template <ReductionType REDTYPE, typename T> struct _wcr_fixed { static DACE_HDFI void reduce(T *ptr, const T& value); static DACE_HDFI void reduce_atomic(T *ptr, const T& value); DACE_HDFI T operator()(const T &a, const T &b) const; }; // Custom reduction with a lambda function template <typename T> struct wcr_custom { template <typename WCR> static DACE_HDFI void reduce_atomic(WCR wcr, T *ptr, const T& value) { // The slowest kind of atomic operations (locked/compare-and-swap), // this should only happen in case of unrecognized lambdas #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 // Adapted from CUDA's pre-v8.0 double atomicAdd implementation T old = *ptr, assumed; do { assumed = old; old = atomicCAS(ptr, assumed, wcr(assumed, value)); } while (assumed != old); #else #pragma omp critical *ptr = wcr(*ptr, value); #endif } // Non-conflicting version --> no critical section template <typename WCR> static DACE_HDFI void reduce(WCR wcr, T *ptr, const T& value) { *ptr = wcr(*ptr, value); } }; template <typename T> struct _wcr_fixed<ReductionType::Sum, T> { static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr += value; } static DACE_HDFI void reduce_atomic(T *ptr, const T& value) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 atomicAdd(ptr, value); #else #pragma omp atomic *ptr += value; #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a + b; } }; // Implementation of double atomicAdd for CUDA architectures prior to 6.0 #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 600 template <> struct _wcr_fixed<ReductionType::Sum, double> { static DACE_HDFI void reduce(double *ptr, const double& value) { *ptr += value; } static DACE_HDFI void reduce_atomic(double *ptr, const double& value) { unsigned long long int* address_as_ull = (unsigned long long int*)ptr; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __double_as_longlong(value + __longlong_as_double(assumed))); } while (assumed != old); } DACE_HDFI double operator()(const double &a, const double &b) const { return a + b; } }; #endif template <typename T> struct _wcr_fixed<ReductionType::Product, T> { static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr *= value; } static DACE_HDFI void reduce_atomic(T *ptr, const T& value) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 wcr_custom<T>::reduce( _wcr_fixed<ReductionType::Product, T>(), ptr, value); #else #pragma omp atomic *ptr *= value; #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a * b; } }; template <typename T> struct _wcr_fixed<ReductionType::Min, T> { static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr = ::min(*ptr, value); } static DACE_HDFI void reduce_atomic(T *ptr, const T& value) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 atomicMin(ptr, value); #else wcr_custom<T>::reduce( _wcr_fixed<ReductionType::Min, T>(), ptr, value); #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return ::min(a, b); } }; template <typename T> struct _wcr_fixed<ReductionType::Max, T> { static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr = ::max(*ptr, value); } static DACE_HDFI void reduce_atomic(T *ptr, const T& value) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 atomicMax(ptr, value); #else wcr_custom<T>::reduce( _wcr_fixed<ReductionType::Max, T>(), ptr, value); #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return ::max(a, b); } }; template <typename T> struct _wcr_fixed<ReductionType::Logical_And, T> { static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr = (*ptr && value); } static DACE_HDFI void reduce_atomic(T *ptr, const T& value) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 atomicAnd(ptr, value ? T(1) : T(0)); #else T val = (value ? T(1) : T(0)); #pragma omp atomic *ptr &= val; #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a && b; } }; template <typename T> struct _wcr_fixed<ReductionType::Bitwise_And, T> { static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr &= value; } static DACE_HDFI void reduce_atomic(T *ptr, const T& value) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 atomicAnd(ptr, value); #else #pragma omp atomic *ptr &= value; #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a & b; } }; template <typename T> struct _wcr_fixed<ReductionType::Logical_Or, T> { static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr = (*ptr || value); } static DACE_HDFI void reduce_atomic(T *ptr, const T& value) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 atomicOr(ptr, value ? T(1) : T(0)); #else T val = (value ? T(1) : T(0)); #pragma omp atomic *ptr |= val; #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a || b; } }; template <typename T> struct _wcr_fixed<ReductionType::Bitwise_Or, T> { static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr |= value; } static DACE_HDFI void reduce_atomic(T *ptr, const T& value) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 atomicOr(ptr, value); #else #pragma omp atomic *ptr |= value; #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a | b; } }; template <typename T> struct _wcr_fixed<ReductionType::Logical_Xor, T> { static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr = (*ptr != value); } static DACE_HDFI void reduce_atomic(T *ptr, const T& value) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 atomicXor(ptr, value ? T(1) : T(0)); #else T val = (value ? T(1) : T(0)); #pragma omp atomic *ptr ^= val; #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a != b; } }; template <typename T> struct _wcr_fixed<ReductionType::Bitwise_Xor, T> { static DACE_HDFI void reduce(T *ptr, const T& value) { *ptr ^= value; } static DACE_HDFI void reduce_atomic(T *ptr, const T& value) { #if defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 atomicXor(ptr, value); #else #pragma omp atomic *ptr ^= value; #endif } DACE_HDFI T operator()(const T &a, const T &b) const { return a ^ b; } }; ////////////////////////////////////////////////////////////////////////// // Specialization that regresses to critical section / locked update for // unsupported types template<typename T> using EnableIfScalar = typename std::enable_if<std::is_scalar<T>::value>::type; // Any vector type that is not of length 1, or struct/complex types // do not support atomics. In these cases, we regress to locked updates. template <ReductionType REDTYPE, typename T, typename SFINAE = void> struct wcr_fixed { static DACE_HDFI void reduce(T *ptr, const T& value) { _wcr_fixed<REDTYPE, T>::reduce(ptr, value); } static DACE_HDFI void reduce_atomic(T *ptr, const T& value) { wcr_custom<T>::template reduce_atomic( _wcr_fixed<REDTYPE, T>(), ptr, value); } }; // When atomics are supported, use _wcr_fixed normally template <ReductionType REDTYPE, typename T> struct wcr_fixed<REDTYPE, T, EnableIfScalar<T> > { static DACE_HDFI void reduce(T *ptr, const T& value) { _wcr_fixed<REDTYPE, T>::reduce(ptr, value); } static DACE_HDFI void reduce_atomic(T *ptr, const T& value) { _wcr_fixed<REDTYPE, T>::reduce_atomic(ptr, value); } DACE_HDFI T operator()(const T &a, const T &b) const { return _wcr_fixed<REDTYPE, T>()(a, b); } }; #ifdef __CUDACC__ struct StridedIteratorHelper { explicit StridedIteratorHelper(size_t stride) : stride(stride) {} size_t stride; __host__ __device__ __forceinline__ size_t operator()(const size_t &index) const { return index * stride; } }; inline auto stridedIterator(size_t stride) { cub::CountingInputIterator<int> counting_iterator(0); StridedIteratorHelper conversion_op(stride); cub::TransformInputIterator<int, decltype(conversion_op), decltype(counting_iterator)> itr(counting_iterator, conversion_op); return itr; } #endif } // namespace dace #endif // __DACE_REDUCTION_H
tetrahedron.c
#include "tetrahedron_method.h" #include "kgrid.h" #include <stdio.h> #include <stdlib.h> static void test_tetrahedron_method(void); static void mat_copy_matrix_d3(double a[3][3], double b[3][3]); static double mat_get_determinant_d3(double a[3][3]); static int mat_inverse_matrix_d3(double m[3][3], double a[3][3], const double precision); int main(void) { test_tetrahedron_method(); return 0; } /* frequency.dat is in the example directory. */ /* The values in this file are the phonon frequencies of NaCl */ /* with 20x20x20 mesh. Calculation was done with reducing */ /* k-points to the irreducible k-points using phonopy. */ /* (https://github.com/phonopy/phonopy) */ static void test_tetrahedron_method(void) { printf("*** Example of tetrahedron method of NaCl to calculate DOS ***:\n"); printf("Read data from frequency.dat and write DOS to dos.dat.\n"); int i, j, k, l, q, r; /* NaCl 20x20x20 gamma-centre mesh (m=20 and "frequency-202020.dat" file) */ /* NaCl 10x10x10 gamma-centre mesh (m=20 and "frequency-202020.dat" file) */ double lattice[3][3] = { {0.000000000000000, 2.845150738087836, 2.845150738087836}, {2.845150738087836, 0.000000000000000, 2.845150738087836}, {2.845150738087836, 2.845150738087836, 0.000000000000000} }; int num_atom = 2; int m = 20; /* m = 10 for 10x10x10 mesh */ int mesh[3] = {m, m, m}; size_t num_gp = mesh[0] * mesh[1] * mesh[2]; int is_shift[3] = {0, 0, 0}; int grid_address[num_gp][3]; int relative_grid_address[24][4][3]; double rec_lat[3][3]; FILE *fp; char * line = NULL; size_t len = 0; ssize_t read; double frequency[num_gp * num_atom * 3]; double max_f, min_f; double t_omegas[24][4]; int g_addr[3]; int g_addr_double[3]; size_t gp; int num_freqs = 201; double dos[num_freqs]; double integral_dos[num_freqs]; double omegas[num_freqs]; double iw; kgd_get_all_grid_addresses(grid_address, mesh); mat_inverse_matrix_d3(rec_lat, lattice, 1e-5); thm_get_relative_grid_address(relative_grid_address, rec_lat); /* for (i = 0; i < 24; i++) { */ /* for (j = 0; j < 4; j++) { */ /* printf("[%2d %2d %2d] ", */ /* relative_grid_address[i][j][0], */ /* relative_grid_address[i][j][1], */ /* relative_grid_address[i][j][2]); */ /* } */ /* printf("\n"); */ /* } */ /* "frequency-101010.dat" for 10x10x10 mesh */ fp = fopen("frequency-202020.dat", "r"); for (i = 0; i < num_gp * num_atom * 3; i++) { read = getline(&line, &len, fp); if (read == -1) { break; } frequency[i] = strtod(line, NULL); } fclose(fp); max_f = frequency[0]; min_f = frequency[0]; for (i = 0; i < num_gp * num_atom * 3; i++) { if (max_f < frequency[i]) { max_f = frequency[i]; } if (min_f > frequency[i]) { min_f = frequency[i]; } } #pragma omp parallel for private(j, k, l, q, r, g_addr, g_addr_double, gp, t_omegas, iw) for (i = 0; i < num_freqs; i++) { dos[i] = 0; integral_dos[i] = 0; omegas[i] = min_f + (max_f - min_f) / (num_freqs - 1) * i; for (j = 0; j < num_gp; j++) { for (k = 0; k < num_atom * 3; k++) { for (l = 0; l < 24; l++) { for (q = 0; q < 4; q++) { for (r = 0; r < 3; r++) { g_addr[r] = grid_address[j][r] + relative_grid_address[l][q][r]; } kgd_get_grid_address_double_mesh(g_addr_double, g_addr, mesh, is_shift); gp = kgd_get_grid_point_double_mesh(g_addr_double, mesh); t_omegas[l][q] = frequency[gp * num_atom * 3 + k]; } } iw = thm_get_integration_weight(omegas[i], t_omegas, 'J'); dos[i] += iw; iw = thm_get_integration_weight(omegas[i], t_omegas, 'I'); integral_dos[i] += iw; } } } fp = fopen("dos.dat", "w"); for (i = 0; i < num_freqs; i++) { fprintf(fp, "%f %f\n", omegas[i], dos[i] / num_gp); } fprintf(fp, "\n\n"); for (i = 0; i < num_freqs; i++) { fprintf(fp, "%f %f\n", omegas[i], integral_dos[i] / num_gp); } fclose(fp); } static void mat_copy_matrix_d3(double a[3][3], double b[3][3]) { a[0][0] = b[0][0]; a[0][1] = b[0][1]; a[0][2] = b[0][2]; a[1][0] = b[1][0]; a[1][1] = b[1][1]; a[1][2] = b[1][2]; a[2][0] = b[2][0]; a[2][1] = b[2][1]; a[2][2] = b[2][2]; } static double mat_get_determinant_d3(double a[3][3]) { return a[0][0] * (a[1][1] * a[2][2] - a[1][2] * a[2][1]) + a[0][1] * (a[1][2] * a[2][0] - a[1][0] * a[2][2]) + a[0][2] * (a[1][0] * a[2][1] - a[1][1] * a[2][0]); } static int mat_inverse_matrix_d3(double m[3][3], double a[3][3], const double precision) { double det; double c[3][3]; det = mat_get_determinant_d3(a); c[0][0] = (a[1][1] * a[2][2] - a[1][2] * a[2][1]) / det; c[1][0] = (a[1][2] * a[2][0] - a[1][0] * a[2][2]) / det; c[2][0] = (a[1][0] * a[2][1] - a[1][1] * a[2][0]) / det; c[0][1] = (a[2][1] * a[0][2] - a[2][2] * a[0][1]) / det; c[1][1] = (a[2][2] * a[0][0] - a[2][0] * a[0][2]) / det; c[2][1] = (a[2][0] * a[0][1] - a[2][1] * a[0][0]) / det; c[0][2] = (a[0][1] * a[1][2] - a[0][2] * a[1][1]) / det; c[1][2] = (a[0][2] * a[1][0] - a[0][0] * a[1][2]) / det; c[2][2] = (a[0][0] * a[1][1] - a[0][1] * a[1][0]) / det; mat_copy_matrix_d3(m, c); return 1; }
lapack_wrapper.c
/* Copyright (C) 2015 Atsushi Togo */ /* All rights reserved. */ /* This file is part of phonopy. */ /* Redistribution and use in source and binary forms, with or without */ /* modification, are permitted provided that the following conditions */ /* are met: */ /* * Redistributions of source code must retain the above copyright */ /* notice, this list of conditions and the following disclaimer. */ /* * Redistributions in binary form must reproduce the above copyright */ /* notice, this list of conditions and the following disclaimer in */ /* the documentation and/or other materials provided with the */ /* distribution. */ /* * Neither the name of the phonopy project nor the names of its */ /* contributors may be used to endorse or promote products derived */ /* from this software without specific prior written permission. */ /* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS */ /* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT */ /* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS */ /* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE */ /* COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, */ /* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, */ /* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; */ /* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER */ /* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT */ /* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN */ /* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE */ /* POSSIBILITY OF SUCH DAMAGE. */ #include <lapack_wrapper.h> #define min(a,b) ((a)>(b)?(b):(a)) #ifdef MKL_LAPACKE MKL_Complex16 lapack_make_complex_double( double re, double im ) { MKL_Complex16 z; z.real = re; z.imag = im; return z; } #endif int phonopy_zheev(double *w, lapack_complex_double *a, const int n, const char uplo) { lapack_int info; info = LAPACKE_zheev(LAPACK_ROW_MAJOR,'V', uplo, (lapack_int)n, a, (lapack_int)n, w); return (int)info; } int phonopy_pinv(double *data_out, const double *data_in, const int m, const int n, const double cutoff) { int i, j, k; lapack_int info; double *s, *a, *u, *vt, *superb; a = (double*)malloc(sizeof(double) * m * n); s = (double*)malloc(sizeof(double) * min(m,n)); u = (double*)malloc(sizeof(double) * m * m); vt = (double*)malloc(sizeof(double) * n * n); superb = (double*)malloc(sizeof(double) * (min(m,n) - 1)); for (i = 0; i < m * n; i++) { a[i] = data_in[i]; } info = LAPACKE_dgesvd(LAPACK_ROW_MAJOR, 'A', 'A', (lapack_int)m, (lapack_int)n, a, (lapack_int)n, s, u, (lapack_int)m, vt, (lapack_int)n, superb); for (i = 0; i < n * m; i++) { data_out[i] = 0; } for (i = 0; i < m; i++) { for (j = 0; j < n; j++) { for (k = 0; k < min(m,n); k++) { if (s[k] > cutoff) { data_out[j * m + i] += u[i * m + k] / s[k] * vt[k * n + j]; } } } } free(a); free(s); free(u); free(vt); free(superb); return (int)info; } void phonopy_pinv_mt(double *data_out, int *info_out, const double *data_in, const int num_thread, const int *row_nums, const int max_row_num, const int column_num, const double cutoff) { int i; #pragma omp parallel for for (i = 0; i < num_thread; i++) { info_out[i] = phonopy_pinv(data_out + i * max_row_num * column_num, data_in + i * max_row_num * column_num, row_nums[i], column_num, cutoff); } } int phonopy_dsyev(double *data, double *eigvals, const int size, const int algorithm) { lapack_int info; info = 0; switch (algorithm) { case 0: /* dsyev */ info = LAPACKE_dsyev(LAPACK_COL_MAJOR, 'V', 'U', (lapack_int)size, data, (lapack_int)size, eigvals); break; case 1: /* dsyevd */ info = LAPACKE_dsyevd(LAPACK_COL_MAJOR, 'V', 'U', (lapack_int)size, data, (lapack_int)size, eigvals); break; } return (int)info; }
GB_unop__asinh_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__asinh_fc64_fc64) // op(A') function: GB (_unop_tran__asinh_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = casinh (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = casinh (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = casinh (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASINH || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__asinh_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = casinh (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = casinh (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__asinh_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_unaryop__minv_bool_uint32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__minv_bool_uint32 // op(A') function: GB_tran__minv_bool_uint32 // C type: bool // A type: uint32_t // cast: ; // unaryop: cij = true #define GB_ATYPE \ uint32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ ; #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = true ; // casting #define GB_CASTING(z, aij) \ ; ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_MINV || GxB_NO_BOOL || GxB_NO_UINT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__minv_bool_uint32 ( bool *Cx, // Cx and Ax may be aliased uint32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__minv_bool_uint32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
parallel_measurement.c
#include<stdio.h> #include<math.h> #include<omp.h> #include<time.h> #include<string.h> #include<stdlib.h> int nborSize; int halfwidth = 3; int p; // Using the MONOTONIC clock #define CLK CLOCK_MONOTONIC struct timespec diff(struct timespec start, struct timespec end){ struct timespec temp; if((end.tv_nsec-start.tv_nsec)<0){ temp.tv_sec = end.tv_sec-start.tv_sec-1; temp.tv_nsec = 1000000000+end.tv_nsec-start.tv_nsec; } else{ temp.tv_sec = end.tv_sec-start.tv_sec; temp.tv_nsec = end.tv_nsec-start.tv_nsec; } return temp; } typedef struct { unsigned char red,green,blue; } PPMPixel; typedef struct { int x, y; PPMPixel *data; } PPMImage; typedef struct { unsigned char gs; } PPMPixelGS; typedef struct { int x, y; PPMPixelGS *data; } PPMImageGS; #define RGB_COMPONENT_COLOR 255 int red_neighbours[1024]; int blue_neighbours[1024]; int green_neighbours[1024]; int getNbors(PPMImage * im,int x,int y) { int i,j,count=0,m,n; int rows = im->x; int cols = im->y; for(i = x - halfwidth; i <= x + halfwidth; i++){ m = i; if(i < 0) m = 0; else if(i >= rows) m = rows - 1; for(j = y - halfwidth; j <= y + halfwidth; j++){ n = j; if(j < 0) n = 0; else if(j >= cols) n = cols - 1; int idx1 = (m * cols) + n; PPMPixel *temp1 = im->data + idx1; red_neighbours[count] = temp1->red; green_neighbours[count] = temp1->green; blue_neighbours[count] = temp1->blue; count++; } } return 0; } int cmpfunc (const void * a, const void * b) { return ( *(int*)a - *(int*)b ); } PPMImage* changeImage(PPMImage * im) { int rows = im->x; int cols = im->y; int x, y, halfWidth; PPMImage *im2 = (PPMImage*) malloc(sizeof(PPMImage)); im2->x = rows; im2->y = cols; im2->data = (PPMPixel *) malloc(rows*cols*sizeof(PPMPixel)); int qq = 1; omp_set_num_threads(p); #pragma omp parallel for private(x,y) firstprivate(red_neighbours,green_neighbours,blue_neighbours) shared(im,im2,rows,cols,nborSize,halfwidth) for (x = 0; x < rows; x++) { for (y = 0; y < cols; y++) { int i, j, m, n; int count = 0; for(i = x - halfwidth; i <= x + halfwidth; i++){ m = i; if(i < 0) m = 0; else if(i >= rows) m = rows - 1; for(j = y - halfwidth; j <= y + halfwidth; j++){ n = j; if(j < 0) n = 0; else if(j >= cols) n = cols - 1; int idx1 = (m * cols) + n; PPMPixel *temp1 = im->data + idx1; red_neighbours[count] = temp1->red; green_neighbours[count] = temp1->green; blue_neighbours[count] = temp1->blue; count++; } } int ii=0; qsort(red_neighbours, nborSize, sizeof(int), cmpfunc); qsort(green_neighbours, nborSize, sizeof(int), cmpfunc); qsort(blue_neighbours, nborSize, sizeof(int), cmpfunc); int index = (x * cols) + y; PPMPixel *temp2 = im2->data + index; temp2->red = red_neighbours[(nborSize/2)+1]; temp2->green = green_neighbours[(nborSize/2)+1]; temp2->blue = blue_neighbours[(nborSize/2)+1]; } } return im2; } static PPMImage *readPPM(const char *filename) { char buff[16]; PPMImage *img; FILE *fp; int c, rgb_comp_color; //open PPM file for reading fp = fopen(filename, "rb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //read image format if (!fgets(buff, sizeof(buff), fp)) { perror(filename); exit(1); } //check the image format if (buff[0] != 'P' || buff[1] != '6') { fprintf(stderr, "Invalid image format (must be 'P6')\n"); exit(1); } //alloc memory form image img = (PPMImage *)malloc(sizeof(PPMImage)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //check for comments c = getc(fp); while (c == '#') { while (getc(fp) != '\n') ; c = getc(fp); } ungetc(c, fp); //read image size information if (fscanf(fp, "%d %d", &img->x, &img->y) != 2) { fprintf(stderr, "Invalid image size (error loading '%s')\n", filename); exit(1); } //read rgb component if (fscanf(fp, "%d", &rgb_comp_color) != 1) { fprintf(stderr, "Invalid rgb component (error loading '%s')\n", filename); exit(1); } //check rgb component depth if (rgb_comp_color!= RGB_COMPONENT_COLOR) { fprintf(stderr, "'%s' does not have 8-bits components\n", filename); exit(1); } while (fgetc(fp) != '\n') ; //memory allocation for pixel data img->data = (PPMPixel*)malloc(img->x * img->y * sizeof(PPMPixel)); if (!img) { fprintf(stderr, "Unable to allocate memory\n"); exit(1); } //read pixel data from file if (fread(img->data, 3 * img->x, img->y, fp) != img->y) { fprintf(stderr, "Error loading image '%s'\n", filename); exit(1); } fclose(fp); return img; } void writePPM(const char *filename, PPMImage *img) { FILE *fp; //open file for output fp = fopen(filename, "wb"); if (!fp) { fprintf(stderr, "Unable to open file '%s'\n", filename); exit(1); } //write the header file //image format fprintf(fp, "P6\n"); //comments //image size fprintf(fp, "%d %d\n",img->x,img->y); // rgb component depth fprintf(fp, "%d\n",255); // pixel data fwrite(img->data, 3 * img->x, img->y, fp); fclose(fp); } int main(int argc, char* argv[]) { struct timespec start_e2e, end_e2e, start_alg, end_alg, e2e, alg; /* Should start before anything else */ clock_gettime(CLK, &start_e2e); /* Check if enough command-line arguments are taken in. */ if(argc < 3){ printf( "Usage: %s n p \n", argv[0] ); return -1; } int n=atoi(argv[1]); /* size of input array */ p=atoi(argv[2]); /* number of processors*/ char *problem_name = "median_filtering"; char *approach_name = "qsort"; FILE* outputFile; char* c=argv[1]; char* str="../../Lenna"; char* str2=malloc(15); strcpy(str2,str); strcat(str2,c); char* str3=".ppm"; strcat(str2,str3); char* filename=str2; PPMImage *im; im = readPPM(filename); char outputFileName[50]; sprintf(outputFileName,"output/%s_%s_%s_%s_output.txt",problem_name,approach_name,argv[1],argv[2]); clock_gettime(CLK, &start_alg); /* Start the algo timer */ /*----------------------Core algorithm starts here----------------------------------------------*/ double start_time = omp_get_wtime(); PPMImage* im2 = changeImage(im); double end_time = omp_get_wtime(); /*----------------------Core algorithm finished--------------------------------------------------*/ clock_gettime(CLK, &end_alg); /* End the algo timer */ /* Ensure that only the algorithm is present between these two timers. Further, the whole algorithm should be present. */ char outputImageName[1024]; outputImageName[0] = '\0'; strcat(outputImageName, "../../Lenna_"); strcat(outputImageName, argv[1]); strcat(outputImageName,"_filtering_parallel.ppm"); writePPM(outputImageName,im2); /* Should end before anything else (printing comes later) */ clock_gettime(CLK, &end_e2e); e2e = diff(start_e2e, end_e2e); alg = diff(start_alg, end_alg); printf("%s,%s,%d,%d,%d,%ld,%d,%ld\n", problem_name, approach_name, n, p, e2e.tv_sec, e2e.tv_nsec, alg.tv_sec, alg.tv_nsec); return 0; }
s_aatritemp.h
/* * Mesa 3-D graphics library * Version: 7.0.3 * * Copyright (C) 1999-2007 Brian Paul All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * BRIAN PAUL BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN * AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ /* * Antialiased Triangle Rasterizer Template * * This file is #include'd to generate custom AA triangle rasterizers. * NOTE: this code hasn't been optimized yet. That'll come after it * works correctly. * * The following macros may be defined to indicate what auxillary information * must be copmuted across the triangle: * DO_Z - if defined, compute Z values * DO_ATTRIBS - if defined, compute texcoords, varying, etc. */ /*void triangle( struct gl_context *ctx, GLuint v0, GLuint v1, GLuint v2, GLuint pv )*/ { const SWcontext *swrast = SWRAST_CONTEXT(ctx); const GLfloat *p0 = v0->attrib[FRAG_ATTRIB_WPOS]; const GLfloat *p1 = v1->attrib[FRAG_ATTRIB_WPOS]; const GLfloat *p2 = v2->attrib[FRAG_ATTRIB_WPOS]; const SWvertex *vMin, *vMid, *vMax; GLint iyMin, iyMax; GLfloat yMin, yMax; GLboolean ltor; GLfloat majDx, majDy; /* major (i.e. long) edge dx and dy */ SWspan span; #ifdef DO_Z GLfloat zPlane[4]; #endif GLfloat rPlane[4], gPlane[4], bPlane[4], aPlane[4]; #if defined(DO_ATTRIBS) GLfloat attrPlane[FRAG_ATTRIB_MAX][4][4]; GLfloat wPlane[4]; /* win[3] */ #endif GLfloat bf = SWRAST_CONTEXT(ctx)->_BackfaceCullSign; (void) swrast; INIT_SPAN(span, GL_POLYGON); span.arrayMask = SPAN_COVERAGE; /* determine bottom to top order of vertices */ { GLfloat y0 = v0->attrib[FRAG_ATTRIB_WPOS][1]; GLfloat y1 = v1->attrib[FRAG_ATTRIB_WPOS][1]; GLfloat y2 = v2->attrib[FRAG_ATTRIB_WPOS][1]; if (y0 <= y1) { if (y1 <= y2) { vMin = v0; vMid = v1; vMax = v2; /* y0<=y1<=y2 */ } else if (y2 <= y0) { vMin = v2; vMid = v0; vMax = v1; /* y2<=y0<=y1 */ } else { vMin = v0; vMid = v2; vMax = v1; bf = -bf; /* y0<=y2<=y1 */ } } else { if (y0 <= y2) { vMin = v1; vMid = v0; vMax = v2; bf = -bf; /* y1<=y0<=y2 */ } else if (y2 <= y1) { vMin = v2; vMid = v1; vMax = v0; bf = -bf; /* y2<=y1<=y0 */ } else { vMin = v1; vMid = v2; vMax = v0; /* y1<=y2<=y0 */ } } } majDx = vMax->attrib[FRAG_ATTRIB_WPOS][0] - vMin->attrib[FRAG_ATTRIB_WPOS][0]; majDy = vMax->attrib[FRAG_ATTRIB_WPOS][1] - vMin->attrib[FRAG_ATTRIB_WPOS][1]; /* front/back-face determination and cullling */ { const GLfloat botDx = vMid->attrib[FRAG_ATTRIB_WPOS][0] - vMin->attrib[FRAG_ATTRIB_WPOS][0]; const GLfloat botDy = vMid->attrib[FRAG_ATTRIB_WPOS][1] - vMin->attrib[FRAG_ATTRIB_WPOS][1]; const GLfloat area = majDx * botDy - botDx * majDy; /* Do backface culling */ if (area * bf < 0 || area == 0 || IS_INF_OR_NAN(area)) return; ltor = (GLboolean) (area < 0.0F); span.facing = area * swrast->_BackfaceSign > 0.0F; } /* Plane equation setup: * We evaluate plane equations at window (x,y) coordinates in order * to compute color, Z, fog, texcoords, etc. This isn't terribly * efficient but it's easy and reliable. */ #ifdef DO_Z compute_plane(p0, p1, p2, p0[2], p1[2], p2[2], zPlane); span.arrayMask |= SPAN_Z; #endif if (ctx->Light.ShadeModel == GL_SMOOTH) { compute_plane(p0, p1, p2, v0->color[RCOMP], v1->color[RCOMP], v2->color[RCOMP], rPlane); compute_plane(p0, p1, p2, v0->color[GCOMP], v1->color[GCOMP], v2->color[GCOMP], gPlane); compute_plane(p0, p1, p2, v0->color[BCOMP], v1->color[BCOMP], v2->color[BCOMP], bPlane); compute_plane(p0, p1, p2, v0->color[ACOMP], v1->color[ACOMP], v2->color[ACOMP], aPlane); } else { constant_plane(v2->color[RCOMP], rPlane); constant_plane(v2->color[GCOMP], gPlane); constant_plane(v2->color[BCOMP], bPlane); constant_plane(v2->color[ACOMP], aPlane); } span.arrayMask |= SPAN_RGBA; #if defined(DO_ATTRIBS) { const GLfloat invW0 = v0->attrib[FRAG_ATTRIB_WPOS][3]; const GLfloat invW1 = v1->attrib[FRAG_ATTRIB_WPOS][3]; const GLfloat invW2 = v2->attrib[FRAG_ATTRIB_WPOS][3]; compute_plane(p0, p1, p2, invW0, invW1, invW2, wPlane); span.attrStepX[FRAG_ATTRIB_WPOS][3] = plane_dx(wPlane); span.attrStepY[FRAG_ATTRIB_WPOS][3] = plane_dy(wPlane); ATTRIB_LOOP_BEGIN GLuint c; if (swrast->_InterpMode[attr] == GL_FLAT) { for (c = 0; c < 4; c++) { constant_plane(v2->attrib[attr][c] * invW2, attrPlane[attr][c]); } } else { for (c = 0; c < 4; c++) { const GLfloat a0 = v0->attrib[attr][c] * invW0; const GLfloat a1 = v1->attrib[attr][c] * invW1; const GLfloat a2 = v2->attrib[attr][c] * invW2; compute_plane(p0, p1, p2, a0, a1, a2, attrPlane[attr][c]); } } for (c = 0; c < 4; c++) { span.attrStepX[attr][c] = plane_dx(attrPlane[attr][c]); span.attrStepY[attr][c] = plane_dy(attrPlane[attr][c]); } ATTRIB_LOOP_END } #endif /* Begin bottom-to-top scan over the triangle. * The long edge will either be on the left or right side of the * triangle. We always scan from the long edge toward the shorter * edges, stopping when we find that coverage = 0. If the long edge * is on the left we scan left-to-right. Else, we scan right-to-left. */ yMin = vMin->attrib[FRAG_ATTRIB_WPOS][1]; yMax = vMax->attrib[FRAG_ATTRIB_WPOS][1]; iyMin = (GLint) yMin; iyMax = (GLint) yMax + 1; if (ltor) { /* scan left to right */ const GLfloat *pMin = vMin->attrib[FRAG_ATTRIB_WPOS]; const GLfloat *pMid = vMid->attrib[FRAG_ATTRIB_WPOS]; const GLfloat *pMax = vMax->attrib[FRAG_ATTRIB_WPOS]; const GLfloat dxdy = majDx / majDy; const GLfloat xAdj = dxdy < 0.0F ? -dxdy : 0.0F; GLint iy; #ifdef _OPENMP #pragma omp parallel for schedule(dynamic) private(iy) firstprivate(span) #endif for (iy = iyMin; iy < iyMax; iy++) { GLfloat x = pMin[0] - (yMin - iy) * dxdy; GLint ix, startX = (GLint) (x - xAdj); GLuint count; GLfloat coverage = 0.0F; #ifdef _OPENMP /* each thread needs to use a different (global) SpanArrays variable */ span.array = SWRAST_CONTEXT(ctx)->SpanArrays + omp_get_thread_num(); #endif /* skip over fragments with zero coverage */ while (startX < MAX_WIDTH) { coverage = compute_coveragef(pMin, pMid, pMax, startX, iy); if (coverage > 0.0F) break; startX++; } /* enter interior of triangle */ ix = startX; #if defined(DO_ATTRIBS) /* compute attributes at left-most fragment */ span.attrStart[FRAG_ATTRIB_WPOS][3] = solve_plane(ix + 0.5F, iy + 0.5F, wPlane); ATTRIB_LOOP_BEGIN GLuint c; for (c = 0; c < 4; c++) { span.attrStart[attr][c] = solve_plane(ix + 0.5F, iy + 0.5F, attrPlane[attr][c]); } ATTRIB_LOOP_END #endif count = 0; while (coverage > 0.0F) { /* (cx,cy) = center of fragment */ const GLfloat cx = ix + 0.5F, cy = iy + 0.5F; SWspanarrays *array = span.array; array->coverage[count] = coverage; #ifdef DO_Z array->z[count] = (GLuint) solve_plane(cx, cy, zPlane); #endif array->rgba[count][RCOMP] = solve_plane_chan(cx, cy, rPlane); array->rgba[count][GCOMP] = solve_plane_chan(cx, cy, gPlane); array->rgba[count][BCOMP] = solve_plane_chan(cx, cy, bPlane); array->rgba[count][ACOMP] = solve_plane_chan(cx, cy, aPlane); ix++; count++; coverage = compute_coveragef(pMin, pMid, pMax, ix, iy); } if (ix > startX) { span.x = startX; span.y = iy; span.end = (GLuint) ix - (GLuint) startX; _swrast_write_rgba_span(ctx, &span); } } } else { /* scan right to left */ const GLfloat *pMin = vMin->attrib[FRAG_ATTRIB_WPOS]; const GLfloat *pMid = vMid->attrib[FRAG_ATTRIB_WPOS]; const GLfloat *pMax = vMax->attrib[FRAG_ATTRIB_WPOS]; const GLfloat dxdy = majDx / majDy; const GLfloat xAdj = dxdy > 0 ? dxdy : 0.0F; GLint iy; #ifdef _OPENMP #pragma omp parallel for schedule(dynamic) private(iy) firstprivate(span) #endif for (iy = iyMin; iy < iyMax; iy++) { GLfloat x = pMin[0] - (yMin - iy) * dxdy; GLint ix, left, startX = (GLint) (x + xAdj); GLuint count, n; GLfloat coverage = 0.0F; #ifdef _OPENMP /* each thread needs to use a different (global) SpanArrays variable */ span.array = SWRAST_CONTEXT(ctx)->SpanArrays + omp_get_thread_num(); #endif /* make sure we're not past the window edge */ if (startX >= ctx->DrawBuffer->_Xmax) { startX = ctx->DrawBuffer->_Xmax - 1; } /* skip fragments with zero coverage */ while (startX > 0) { coverage = compute_coveragef(pMin, pMax, pMid, startX, iy); if (coverage > 0.0F) break; startX--; } /* enter interior of triangle */ ix = startX; count = 0; while (coverage > 0.0F) { /* (cx,cy) = center of fragment */ const GLfloat cx = ix + 0.5F, cy = iy + 0.5F; SWspanarrays *array = span.array; ASSERT(ix >= 0); array->coverage[ix] = coverage; #ifdef DO_Z array->z[ix] = (GLuint) solve_plane(cx, cy, zPlane); #endif array->rgba[ix][RCOMP] = solve_plane_chan(cx, cy, rPlane); array->rgba[ix][GCOMP] = solve_plane_chan(cx, cy, gPlane); array->rgba[ix][BCOMP] = solve_plane_chan(cx, cy, bPlane); array->rgba[ix][ACOMP] = solve_plane_chan(cx, cy, aPlane); ix--; count++; coverage = compute_coveragef(pMin, pMax, pMid, ix, iy); } #if defined(DO_ATTRIBS) /* compute attributes at left-most fragment */ span.attrStart[FRAG_ATTRIB_WPOS][3] = solve_plane(ix + 1.5F, iy + 0.5F, wPlane); ATTRIB_LOOP_BEGIN GLuint c; for (c = 0; c < 4; c++) { span.attrStart[attr][c] = solve_plane(ix + 1.5F, iy + 0.5F, attrPlane[attr][c]); } ATTRIB_LOOP_END #endif if (startX > ix) { n = (GLuint) startX - (GLuint) ix; left = ix + 1; /* shift all values to the left */ /* XXX this is temporary */ { SWspanarrays *array = span.array; GLint j; for (j = 0; j < (GLint) n; j++) { array->coverage[j] = array->coverage[j + left]; COPY_CHAN4(array->rgba[j], array->rgba[j + left]); #ifdef DO_Z array->z[j] = array->z[j + left]; #endif } } span.x = left; span.y = iy; span.end = n; _swrast_write_rgba_span(ctx, &span); } } } } #undef DO_Z #undef DO_ATTRIBS #undef DO_OCCLUSION_TEST
opi.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <omp.h> int main(int argc, char **argv) { //seed random number generator // Q2b: get the number of threads to run with from agrv and // add OpenMP API code to set number of threads here int Nthreads = atoi(argv[0]); omp_set_num_threads(Nthreads); struct drand48_data *drandData; drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data)); // Q2c: add an OpenMP parallel region here, wherein each thread initializes // one entry in drandData using srand48_r and seed based on thread number double start_time = omp_get_wtime(); #pragma omp parallel { long int seed = Nthreads; srand48_r(seed, drandData+0); } long long int Ntrials = 10000000; //need running tallies long long int Ntotal=0; long long int Ncircle=0; #pragma omp parallel { #pragma omp for reduction(+:Ncircle) for (long long int n=0; n<Ntrials; n++) { double rand1; double rand2; //gererate two random numbers (use the thread id to offset drandData) drand48_r(drandData+0, &rand1); drand48_r(drandData+0, &rand2); double x = -1 + 2*rand1; //shift to [-1,1] double y = -1 + 2*rand2; //check if its in the circle if (sqrt(x*x+y*y)<=1) Ncircle++; Ntotal++; if (n%100 ==0) { double pi = 4.0*Ncircle/ (double) (n); //printf("Our estimate of pi is %g \n", pi); } } } double end_time = omp_get_wtime(); printf("Time is: %f \n", end_time-start_time); double pi = 4.0*Ncircle/ (double) (Ntotal); printf("Our final estimate of pi is %g \n", pi); free(drandData); return 0; }
GB_ijsort.c
//------------------------------------------------------------------------------ // GB_ijsort: sort an index array I and remove duplicates //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // Sort an index array and remove duplicates. In MATLAB notation: /* [I1 I1k] = sort (I) ; Iduplicate = [I1 (1:end-1) == I1 (2:end)), false] ; I2 = I1 (~Iduplicate) ; I2k = I1k (~Iduplicate) ; */ #include "GB_ij.h" #include "GB_sort.h" #define GB_FREE_WORK \ { \ GB_FREE_MEMORY (W0, ni, sizeof (GrB_Index)) ; \ GB_FREE_MEMORY (W1, ni, sizeof (GrB_Index)) ; \ GB_FREE_MEMORY (I1, ni, sizeof (GrB_Index)) ; \ GB_FREE_MEMORY (I1k, ni, sizeof (GrB_Index)) ; \ } GrB_Info GB_ijsort ( const GrB_Index *restrict I, // size ni, where ni > 1 always holds int64_t *restrict p_ni, // : size of I, output: # of indices in I2 GrB_Index *restrict *p_I2, // size ni2, where I2 [0..ni2-1] // contains the sorted indices with duplicates removed. GrB_Index *restrict *p_I2k, // output array of size ni2 GB_Context Context ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (I != NULL) ; ASSERT (p_ni != NULL) ; ASSERT (p_I2 != NULL) ; ASSERT (p_I2k != NULL) ; //-------------------------------------------------------------------------- // get inputs //-------------------------------------------------------------------------- GrB_Index *restrict I1 = NULL ; GrB_Index *restrict I1k = NULL ; GrB_Index *restrict I2 = NULL ; GrB_Index *restrict I2k = NULL ; int64_t *restrict W0 = NULL ; int64_t *restrict W1 = NULL ; int64_t ni = *p_ni ; ASSERT (ni > 1) ; //-------------------------------------------------------------------------- // determine the number of threads to use //-------------------------------------------------------------------------- GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ; int nthreads = GB_nthreads (ni, chunk, nthreads_max) ; //-------------------------------------------------------------------------- // allocate workspace //-------------------------------------------------------------------------- GB_MALLOC_MEMORY (I1, ni, sizeof (GrB_Index)) ; GB_MALLOC_MEMORY (I1k, ni, sizeof (GrB_Index)) ; if (I1 == NULL || I1k == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // copy I into I1 and construct I1k //-------------------------------------------------------------------------- GB_memcpy (I1, I, ni * sizeof (GrB_Index), nthreads) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t k = 0 ; k < ni ; k++) { I1k [k] = k ; } //-------------------------------------------------------------------------- // sort [I1 I1k] //-------------------------------------------------------------------------- if (nthreads == 1) { //---------------------------------------------------------------------- // sequential quicksort //---------------------------------------------------------------------- GB_qsort_2 ((int64_t *) I1, (int64_t *) I1k, ni) ; } else { //---------------------------------------------------------------------- // parallel mergesort //---------------------------------------------------------------------- GB_MALLOC_MEMORY (W0, ni, sizeof (int64_t)) ; GB_MALLOC_MEMORY (W1, ni, sizeof (int64_t)) ; if (W0 == NULL || W1 == NULL) { // out of memory GB_FREE_WORK ; return (GB_OUT_OF_MEMORY) ; } GB_msort_2 ((int64_t *) I1, (int64_t *) I1k, W0, W1, ni, nthreads) ; GB_FREE_MEMORY (W0, ni, sizeof (int64_t)) ; GB_FREE_MEMORY (W1, ni, sizeof (int64_t)) ; } //-------------------------------------------------------------------------- // count unique entries in I1 //-------------------------------------------------------------------------- int ntasks = (nthreads == 1) ? 1 : (32 * nthreads) ; ntasks = GB_IMIN (ntasks, ni) ; ntasks = GB_IMAX (ntasks, 1) ; int64_t Count [ntasks+1] ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t kfirst, klast, my_count = (tid == 0) ? 1 : 0 ; GB_PARTITION (kfirst, klast, ni, tid, ntasks) ; for (int64_t k = GB_IMAX (kfirst,1) ; k < klast ; k++) { if (I1 [k-1] != I1 [k]) { my_count++ ; } } Count [tid] = my_count ; } GB_cumsum (Count, ntasks, NULL, 1) ; int64_t ni2 = Count [ntasks] ; //-------------------------------------------------------------------------- // allocate the result I2 //-------------------------------------------------------------------------- GB_MALLOC_MEMORY (I2 , ni2, sizeof (GrB_Index)) ; GB_MALLOC_MEMORY (I2k, ni2, sizeof (GrB_Index)) ; if (I2 == NULL || I2k == NULL) { // out of memory GB_FREE_WORK ; GB_FREE_MEMORY (I2 , ni2, sizeof (GrB_Index)) ; GB_FREE_MEMORY (I2k, ni2, sizeof (GrB_Index)) ; return (GB_OUT_OF_MEMORY) ; } //-------------------------------------------------------------------------- // construct the new list I2 from I1, removing duplicates //-------------------------------------------------------------------------- #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) for (int tid = 0 ; tid < ntasks ; tid++) { int64_t kfirst, klast, k2 = Count [tid] ; GB_PARTITION (kfirst, klast, ni, tid, ntasks) ; if (tid == 0) { // the first entry in I1 is never a duplicate I2 [k2] = I1 [0] ; I2k [k2] = I1k [0] ; k2++ ; } for (int64_t k = GB_IMAX (kfirst,1) ; k < klast ; k++) { if (I1 [k-1] != I1 [k]) { I2 [k2] = I1 [k] ; I2k [k2] = I1k [k] ; k2++ ; } } } //-------------------------------------------------------------------------- // check result: compare with single-pass, single-threaded algorithm //-------------------------------------------------------------------------- #ifdef GB_DEBUG { int64_t ni1 = 1 ; for (int64_t k = 1 ; k < ni ; k++) { if (I1 [ni1-1] != I1 [k]) { I1 [ni1] = I1 [k] ; I1k [ni1] = I1k [k] ; ni1++ ; } } // printf ("OK "GBd" "GBd"\n", ni1, ni) ; ASSERT (ni1 == ni2) ; for (int64_t k = 0 ; k < ni1 ; k++) { ASSERT (I1 [k] == I2 [k]) ; ASSERT (I1k [k] == I2k [k]) ; } } #endif //-------------------------------------------------------------------------- // free workspace and return the new sorted list //-------------------------------------------------------------------------- GB_FREE_WORK ; *(p_I2 ) = (GrB_Index *) I2 ; *(p_I2k) = (GrB_Index *) I2k ; *(p_ni ) = (int64_t ) ni2 ; return (GrB_SUCCESS) ; }
helper.c
#include "helper.h" #include <omp.h> #include <math.h> #include <assert.h> void createMatrixScheme1(double** D, double** E, int n) { *D = malloc(n * sizeof(double)); *E = malloc((n-1) * sizeof(double)); double diagSpacing = (100.0-1.0) / (n-1); int i; #pragma omp parallel for default(shared) private(i) schedule(static) for (i = 0; i < n-1; ++i) { (*E)[i] = -1; // off diagonal (*D)[i] = 1.0 + i * diagSpacing; } (*D)[n-1] = 1.0 + (n-1) * diagSpacing; // one more diagonal element than off diagonal elements } void createMatrixScheme2(double **D, double **E, int n) { *D = malloc(n * sizeof(double)); *E = malloc((n-1) * sizeof(double)); int i; #pragma omp parallel for default(shared) private(i) schedule(static) for (i = 0; i < n-1; ++i) { (*E)[i] = -1; // off diagonal (*D)[i] = 2; } (*D)[n-1] = 2.0; // one more diagonal element than off diagonal elements } double* computeZ(double* Q1l, double* Q2f, int nq1, int nq2, double theta) { double* z = malloc((nq1+nq2) * sizeof(double)); // copy last row of Q1 into z memcpy(z, Q1l, nq1*sizeof(double)); // multiply first row of Q2 by theta^-1 int i; #pragma omp parallel for default(shared) private(i) schedule(static) for(i = 0; i < nq2; ++i) { z[nq1+i] = Q2f[i] / theta; } return z; } double* computeEigenvaluesOfScheme2(int n) { double* L = malloc(n * sizeof(double)); int i; #pragma omp parallel for default(shared) private(i) schedule(static) for (i = 0; i < n; ++i) L[i] = 2 + 2 * cos((M_PI*(i+1))/(n+1)); return L; } void printVector(double* vec, int n) { int i = 0; for (i = 0; i < n-1; ++i) printf("%g, ", vec[i]); printf("%g\n", vec[n-1]); } void printTridiagonalMatrix(double* D, double* E, int n) { assert(n>0); if (n == 1) printf("%g\n", D[0]); else if (n == 2) { printf("%g\t%g\n", D[0], E[0]); printf("%g\t%g\n", E[0], D[1]); } else { int i = 0; printf("0\t%g\t%g\n", D[0], E[0]); for (i = 1; i < n-1; ++i) printf("%g\t%g\t%g\n", E[i-1], D[i], E[i]); printf("%g\t%g\t0\n", E[n-2], D[n-1]); } } void printMatrix(double* M, int r, int c) { int i; for (i = 0; i < r; ++i) { printVector(M+i*c,c); } } int compareDiagElem( const void* a, const void* b) { DiagElem e1 = * ( (DiagElem*) a ); DiagElem e2 = * ( (DiagElem*) b ); if ( e1.e == e2.e ) return 0; else if ( e1.e < e2.e ) return -1; else return 1; }
GB_binop__eq_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__eq_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__eq_fp32) // A*D function (colscale): GB (_AxD__eq_fp32) // D*A function (rowscale): GB (_DxB__eq_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__eq_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__eq_fp32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__eq_fp32) // C=scalar+B GB (_bind1st__eq_fp32) // C=scalar+B' GB (_bind1st_tran__eq_fp32) // C=A+scalar GB (_bind2nd__eq_fp32) // C=A'+scalar GB (_bind2nd_tran__eq_fp32) // C type: bool // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (aij == bij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (x == y) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_EQ || GxB_NO_FP32 || GxB_NO_EQ_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__eq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__eq_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__eq_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__eq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__eq_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *restrict Cx = (bool *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__eq_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__eq_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__eq_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__eq_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__eq_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__eq_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (x == bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__eq_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (aij == y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (x == aij) ; \ } GrB_Info GB (_bind1st_tran__eq_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij == y) ; \ } GrB_Info GB (_bind2nd_tran__eq_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
pjencss.c
// Copyright (c) 2018 Intel Corporation // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in all // copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE // SOFTWARE. /* // // Purpose: // Downsampling functions // // Contents: // mfxiSampleDownH2V1_JPEG_8u_C1R // mfxiSampleDownH2V2_JPEG_8u_C1R // */ #include "precomp.h" #ifdef _OPENMP #include <omp.h> #endif #ifndef __OWNJ_H__ #include "ownj.h" #endif #ifndef __PJENCSS_H__ #include "pjencss.h" #endif /* ---------------------- library functions definitions -------------------- */ /* //////////////////////////////////////////////////////////////////////////// // Name: // mfxiSampleDownH2V1_JPEG_8u_C1R // // Purpose: // Sample down by horizontal 2:1 // // Parameter: // pSrc pointer to input data // srcStep line offset in input data // srcSize ROI size // pDst pointer to output array // dstStep line offset in output data // dstSize ROI size // // Returns: // IppStatus // // Notes: // */ IPPFUN(IppStatus, mfxiSampleDownH2V1_JPEG_8u_C1R, ( const Ipp8u* pSrc, int srcStep, IppiSize srcSize, Ipp8u* pDst, int dstStep, IppiSize dstSize)) { int i; IPP_BAD_ENC_SS_C1C1_RET(); #ifndef _W7 #ifdef _OPENMP #pragma omp parallel for IPP_OMP_NUM_THREADS() \ shared(pSrc,srcStep,pDst,dstStep,dstSize) \ private(i) default(none) \ if((dstSize.height*dstSize.width) > (OMP_BOUNDARY)) #endif #endif for(i = 0; i < dstSize.height; i++) { const Ipp8u* src = pSrc + i * srcStep; Ipp8u* dst = pDst + i * dstStep; #if (_IPP >= _IPP_M6) || (_IPP32E >= _IPP32E_M7) || (_IPP64 >= _IPP64_I7) || (_IPPLRB>=_IPPLRB_B1) mfxownpj_SampleDownH2V1_JPEG_8u_C1(src,dst,dstSize.width); #else { int j, k; for(k = 0, j = 0; j < dstSize.width; j++, k += 2) { dst[j] = (Ipp8u)((src[k + 0] + src[k + 1] + 1) >> 1); } } #endif } return ippStsNoErr; } /* mfxiSampleDownH2V1_JPEG_8u_C1R() */ /* //////////////////////////////////////////////////////////////////////////// // Name: // mfxiSampleDownH2V2_JPEG_8u_C1R // // Purpose: // Sample down by horizontal 2:1 and vertical 2:1 // // Parameter: // pSrc pointer to input data // srcStep line offset in input data // srcSize ROI size // pDst pointer to output array // dstStep line offset in output data // dstSize ROI size // // Returns: // IppStatus // // Notes: // */ IPPFUN(IppStatus, mfxiSampleDownH2V2_JPEG_8u_C1R, ( const Ipp8u* pSrc, int srcStep, IppiSize srcSize, Ipp8u* pDst, int dstStep, IppiSize dstSize)) { int i; IPP_BAD_ENC_SS_C1C1_RET(); #ifdef _OPENMP #pragma omp parallel for IPP_OMP_NUM_THREADS() \ shared(pSrc,srcStep,pDst,dstStep,dstSize) \ private(i) default(none) \ if((dstSize.height*dstSize.width) > (OMP_BOUNDARY)) #endif for(i = 0; i < dstSize.height; i++) { const Ipp8u* src = pSrc + i * (srcStep << 1); Ipp8u* dst = pDst + i * dstStep; #if (_IPP >= _IPP_M6) || (_IPP32E >= _IPP32E_M7) || (_IPP64 >= _IPP64_I7) || (_IPPLRB>=_IPPLRB_B1) mfxownpj_SampleDownH2V2_JPEG_8u_C1(src,srcStep,dst,dstSize.width); #else { int j, k; for(k = 0, j = 0; j < dstSize.width; j++, k += 2) { dst[j] = (Ipp8u) ((src[k + 0 + 0*srcStep] + src[k + 1 + 0*srcStep] + src[k + 0 + 1*srcStep] + src[k + 1 + 1*srcStep] + 3) >> 2); } } #endif } return ippStsNoErr; } /* mfxiSampleDownH2V2_JPEG_8u_C1R() */
render_prefix_tree_util.h
/* * LSGL - Large Scale Graphics Library * * Copyright (c) 2013 - 2015 Advanced Institute for Computational Science, *RIKEN. * All rights reserved. * */ // // References // - Maximizing Parallelism in the Construction of BVHs, Octrees, and k-d Trees // Tero Karras, HPG 2012 // - LUT-based morton code calculation // https://github.com/aavenel/FastMortonKeys // - Produce interleaving bit patterns (morton keys) for 32 bit , 64 bit and // 128bit // http://stackoverflow.com/questions/18529057/produce-interleaving-bit-patterns-morton-keys-for-32-bit-64-bit-and-128bit #ifndef __PREFIX_TREE_UTIL_H__ #define __PREFIX_TREE_UTIL_H__ #include <cstdio> #include <cstdlib> #include <cassert> #include <cstring> #include <new> #include <iostream> #ifdef _MSC_VER #define _USE_MATH_DEFINES #include <math.h> // M_PI #else #include <cmath> #endif #ifdef _OPENMP #include <omp.h> #endif #define __STDC_FORMAT_MACROS #include <inttypes.h> #include <limits> #include <algorithm> #include "render_common.h" #include "render_timerutil.h" namespace lsgl { namespace render { typedef enum { NODE_TYPE_INTERMEDIATE = 0, NODE_TYPE_LEAF = 1, } NodeType; // @todo { save memory } struct NodeInfo32 { uint32_t childIndex; // left = childIndex, right = childIndex + 1 NodeType leftType; NodeType rightType; }; struct NodeInfo64 { uint64_t index; // left = index, right = index + 1 NodeType leftType; NodeType rightType; }; // 30bit = up to 2G primitives. // 60bit = For large scale dataset. struct IndexKey30 { uint32_t index; // Primitive index uint32_t code; // 30bit Morton code }; struct IndexKey60 { uint64_t index; // Primitive index uint64_t code; // 60bit Morton code }; // Comparator for 30bit uint Radix-sort class RadixComparatorUInt30 { const int bit; // bit position [0..29] to examine public: RadixComparatorUInt30(int offset) : bit(offset) {} bool operator()(IndexKey30 value) const // functor { // Assume bit is less than 30 return !(value.code & (1 << bit)); } }; // Comparator for 60bit uint Radix-sort class RadixComparatorUInt60 { const int bit; // bit position [0..59] to examine public: RadixComparatorUInt60(int offset) : bit(offset) {} bool operator()(IndexKey60 value) const // functor { // Assume bit is less than 60 return !(value.code & (1ULL << bit)); } }; // // -- // // Morton code utilities static inline uint32_t PartBy2_30(uint32_t n) { n &= 0x000003ff; n = (n ^ (n << 16)) & 0xff0000ff; n = (n ^ (n << 8)) & 0x0300f00f; n = (n ^ (n << 4)) & 0x030c30c3; n = (n ^ (n << 2)) & 0x09249249; return n; } static inline uint64_t PartBy2_60(uint32_t n) { uint64_t x = n & 0x1fffff; // 21 bits x = (x | x << 32) & 0x1f00000000ffff; x = (x | x << 16) & 0x1f0000ff0000ff; x = (x | x << 8) & 0x100f00f00f00f00f; x = (x | x << 4) & 0x10c30c30c30c30c3; x = (x | x << 2) & 0x1249249249249249; return x; } static inline uint32_t ConstructMortionBits30(uint32_t x, uint32_t y, uint32_t z) { return (PartBy2_30(x) << 2) | (PartBy2_30(y) << 1) | (PartBy2_30(z)); } static inline uint64_t ConstructMortionBits60(uint32_t x, uint32_t y, uint32_t z) { return (PartBy2_60(x) << 2) | (PartBy2_60(y) << 1) | (PartBy2_60(z)); } // Get 30bit mortion code. static inline uint32_t MortionCode30(const real3 &p, const real3 &bmin, real invx, real invy, real invz) { uint32_t ix = (uint32_t)((p[0] - bmin[0]) * invx); uint32_t iy = (uint32_t)((p[1] - bmin[1]) * invy); uint32_t iz = (uint32_t)((p[2] - bmin[2]) * invz); uint32_t code = ConstructMortionBits30(ix, iy, iz); // printf("p = %f, %f, %f\n", p[0], p[1], p[2]); // printf("bmin = %f, %f, %f\n", bmin[0], bmin[1], bmin[2]); // printf("inv = %f, %f, %f\n", invx, invy, invz); // printf("ijk = %d, %d, %d, code = %d\n", ix, iy, iz, code); return code; } // Get 60bit mortion code. static inline uint64_t MortionCode60(const real3 &p, const real3 &bmin, real invx, real invy, real invz) { uint32_t ix = (uint32_t)((p[0] - bmin[0]) * invx); uint32_t iy = (uint32_t)((p[1] - bmin[1]) * invy); uint32_t iz = (uint32_t)((p[2] - bmin[2]) * invz); uint64_t code = ConstructMortionBits60(ix, iy, iz); // printf("p = %f, %f, %f\n", p[0], p[1], p[2]); // printf("bmin = %f, %f, %f\n", bmin[0], bmin[1], bmin[2]); // printf("inv = %f, %f, %f\n", invx, invy, invz); // printf("ijk = %d, %d, %d, code = %d\n", ix, iy, iz, code); return code; } static std::string BitString32(int x) { char buf[32 + 1]; buf[32] = '\0'; for (int i = 0; i < 32; i++) { buf[i] = '0'; } for (int i = 0; i < 32; i++) { if (i != 0 && x == 0) break; if (x % 2 == 0) buf[(32 - 1) - i] = '0'; else buf[(32 - 1) - i] = '1'; x = x / 2; } return std::string(buf); } static std::string BitString64(uint64_t x) { char buf[64 + 1]; buf[64] = '\0'; for (int i = 0; i < 64; i++) { buf[i] = '0'; } for (int i = 0; i < 64; i++) { if (i != 0 && x == 0) break; if (x % 2 == 0) buf[(64 - 1) - i] = '0'; else buf[(64 - 1) - i] = '1'; x = x / 2; } return std::string(buf); } static void QuickSortKey30(IndexKey30 *v, int firstIdx, int lastIdx) { int startIdx[2], endIdx[2]; IndexKey30 pivot, temp; if (firstIdx < lastIdx) { startIdx[1] = firstIdx; endIdx[0] = lastIdx; pivot = v[(firstIdx + lastIdx) / 2]; while (startIdx[1] <= endIdx[0]) { while (v[startIdx[1]].code < pivot.code) startIdx[1]++; while (pivot.code < v[endIdx[0]].code) endIdx[0]--; if (startIdx[1] <= endIdx[0]) { temp = v[startIdx[1]]; v[startIdx[1]] = v[endIdx[0]]; v[endIdx[0]] = temp; startIdx[1]++; endIdx[0]--; } } startIdx[0] = firstIdx; endIdx[1] = lastIdx; { for (size_t i = 0; i <= 1; i++) { QuickSortKey30(v, startIdx[i], endIdx[i]); } } } } // Simple in-place OpenMP parallel quick sorter static void QuickSortKey30OMP(IndexKey30 *v, int firstIdx, int lastIdx) { int startIdx[2], endIdx[2]; IndexKey30 pivot, temp; if (firstIdx < lastIdx) { startIdx[1] = firstIdx; endIdx[0] = lastIdx; pivot = v[(firstIdx + lastIdx) / 2]; while (startIdx[1] <= endIdx[0]) { while (v[startIdx[1]].code < pivot.code) startIdx[1]++; while (pivot.code < v[endIdx[0]].code) endIdx[0]--; if (startIdx[1] <= endIdx[0]) { temp = v[startIdx[1]]; v[startIdx[1]] = v[endIdx[0]]; v[endIdx[0]] = temp; startIdx[1]++; endIdx[0]--; } } startIdx[0] = firstIdx; endIdx[1] = lastIdx; #ifdef _OPENMP #pragma omp parallel #endif { #ifdef _OPENMP #pragma omp for nowait #endif for (int i = 0; i <= 1; i++) { QuickSortKey30(v, startIdx[i], endIdx[i]); } } } } #define MAX_RADIX_SORT_THREADS (8) // Assume less than 2G items // Reference: // - Introduction to GPU Radix Sort // http://www.heterogeneouscompute.org/wordpress/wp-content/uploads/2011/06/RadixSort.pdf #ifdef _OPENMP static void RadixSort30OMP(IndexKey30 *begin, IndexKey30 *end) { unsigned int n = end - begin; if (n < 1024) { QuickSortKey30(begin, 0, n - 1); return; } IndexKey30 *begin1 = new IndexKey30[end - begin]; IndexKey30 *end1 = begin1 + (end - begin); // Process 8bits(256 counts) each. for (unsigned shift = 0; shift < 32; shift += 8) { // unsigned int count[0x100] = {0}; unsigned int local_count[MAX_RADIX_SORT_THREADS][0x100]; unsigned int local_offset[MAX_RADIX_SORT_THREADS][0x100]; IndexKey30 *keys = begin; // 1. Count #pragma omp parallel num_threads(MAX_RADIX_SORT_THREADS) { int tid = omp_get_thread_num(); memset(local_count[tid], 0, sizeof(unsigned int) * 0x100); unsigned int startIdx = (tid * n) / (MAX_RADIX_SORT_THREADS); unsigned int endIdx = (std::min)(n, ((tid + 1) * n) / (MAX_RADIX_SORT_THREADS)); // printf("range (%d, %d)\n", startIdx, endIdx); for (size_t i = startIdx; i < endIdx; i++) { local_count[tid][((keys[i].code) >> shift) & 0xFF]++; } } // 2. Scan { for (int j = 0; j < MAX_RADIX_SORT_THREADS; j++) { memset(local_offset[j], 0, sizeof(unsigned int) * 0x100); } unsigned int local_sum[0x100]; for (int i = 0; i < 0x100; i++) { for (int j = 1; j < MAX_RADIX_SORT_THREADS; j++) { // printf("local_count[%d] = %d\n", i, local_count[j-1][i]); local_offset[j][i] = local_offset[j - 1][i] + local_count[j - 1][i]; } local_sum[i] = local_offset[MAX_RADIX_SORT_THREADS - 1][i] + local_count[MAX_RADIX_SORT_THREADS - 1][i]; // printf("local_sum[%d] = %d\n", i, local_sum[i]); } for (int i = 1; i < 0x100; i++) { local_sum[i] += local_sum[i - 1]; } // printf("local_sum = %d\n", local_sum[0xff]); assert(local_sum[0xff] <= n); for (int i = 1; i < 0x100; i++) { for (int j = 0; j < MAX_RADIX_SORT_THREADS; j++) { local_offset[j][i] += local_sum[i - 1]; } } } // IndexKey30 *bucket[0x100], *q = begin1; // Store offset // for (int i = 0; i < 0x100; q += count[i++]) { // bucket[i] = q; //} // 3. Scatter #pragma omp parallel num_threads(MAX_RADIX_SORT_THREADS) { int tid = omp_get_thread_num(); // Compute per-thread offset pointer IndexKey30 *bucket[0x100]; for (int i = 0; i < 0x100; i++) { bucket[i] = begin1 + local_offset[tid][i]; } unsigned int startIdx = (tid * n) / (MAX_RADIX_SORT_THREADS); unsigned int endIdx = (std::min)(n, ((tid + 1) * n) / (MAX_RADIX_SORT_THREADS)); for (size_t i = startIdx; i < endIdx; i++) { IndexKey30 *p = begin + i; *bucket[((p->code) >> shift) & 0xFF]++ = *p; } } std::swap(begin, begin1); std::swap(end, end1); } delete[] begin1; } #endif static void RadixSort30(IndexKey30 *begin, IndexKey30 *end) { unsigned int n = end - begin; if (n < 1024) { QuickSortKey30(begin, 0, n - 1); return; } IndexKey30 *begin1 = new IndexKey30[end - begin]; IndexKey30 *end1 = begin1 + (end - begin); // Process 8bits(256 counts) each. for (unsigned shift = 0; shift < 32; shift += 8) { unsigned int count[0x100] = {0}; IndexKey30 *keys = begin; // 1. Count { unsigned int startIdx = 0; unsigned int endIdx = end1 - begin1; for (size_t i = startIdx; i < endIdx; i++) { count[((keys[i].code) >> shift) & 0xFF]++; } } IndexKey30 *bucket[0x100], *q = begin1; for (int i = 0; i < 0x100; q += count[i++]) { bucket[i] = q; } // 2. Scatter { unsigned int startIdx = 0; unsigned int endIdx = end1 - begin1; for (size_t i = startIdx; i < endIdx; i++) { IndexKey30 *p = begin + i; *bucket[((p->code) >> shift) & 0xFF]++ = *p; } } std::swap(begin, begin1); std::swap(end, end1); } delete[] begin1; } static void Merge30(IndexKey30 *a, IndexKey30 *b, unsigned int low, unsigned int pivot, unsigned int high) { unsigned int h, i, j, k; h = low; i = low; j = pivot + 1; while ((h <= pivot) && (j <= high)) { if (a[h].code <= a[j].code) { b[i] = a[h]; h++; } else { b[i] = a[j]; j++; } i++; } if (h > pivot) { for (k = j; k <= high; k++) { b[i] = a[k]; i++; } } else { for (k = h; k <= pivot; k++) { b[i] = a[k]; i++; } } for (k = low; k <= high; k++) a[k] = b[k]; } // Sort keys in range [low, high] static void MergeSort30(IndexKey30 *a, IndexKey30 *b, unsigned int low, unsigned int high) { unsigned int pivot; if (low < high) { pivot = (low + high) / 2; if (high - low < 1024 * 10) { // MergeSort30(a, b, low, pivot); // MergeSort30(a, b, pivot + 1, high); QuickSortKey30(a, low, high); } else { #if defined(_MSC_VER) // Visual Studio doesn't support openmp task yet? MergeSort30(a, b, low, pivot); MergeSort30(a, b, pivot + 1, high); #else #ifdef _OPENMP #pragma omp task #endif MergeSort30(a, b, low, pivot); #ifdef _OPENMP #pragma omp task #endif MergeSort30(a, b, pivot + 1, high); #ifdef _OPENMP #pragma omp taskwait #endif #endif } Merge30(a, b, low, pivot, high); } } // Sort Morton code with radix-sort by LSB static void RadixSortByMortionCode30LSB(IndexKey30 *firstIdx, IndexKey30 *lastIdx) { #if 1 for (int lsb = 0; lsb < 30; ++lsb) { std::stable_partition(firstIdx, lastIdx, RadixComparatorUInt30(lsb)); } #else QuickSortKey30(firstIdx, 0, lastIdx - firstIdx); #endif } #if defined(_OPENMP) && !defined(_MSC_VER) static void RadixSortByMortionCode30MSBTask(IndexKey30 *firstIdx, IndexKey30 *lastIdx, int msb = 29) { if ((firstIdx != lastIdx) && (msb >= 0)) { // Sequential IndexKey30 *midIdx = std::partition(firstIdx, lastIdx, RadixComparatorUInt30(msb)); msb--; // decrement most-significant-bit #pragma omp task firstprivate(midIdx) if (msb > 20) // Do not spawn many // tasks for better performance. RadixSortByMortionCode30MSBTask(firstIdx, midIdx, msb); // sort left partition #pragma omp task firstprivate(midIdx) if (msb > 20) // Do not spawn many // tasks for better performance. RadixSortByMortionCode30MSBTask(midIdx, lastIdx, msb); // sort right partition #pragma omp taskwait } } #endif // Sort Morton code with radix-sort by MSB(recursive) static void RadixSortByMortionCode30MSB(IndexKey30 *firstIdx, IndexKey30 *lastIdx, int msb = 29) { #if defined(_OPENMP) && !defined(_MSC_VER) #pragma omp parallel { #pragma omp single { RadixSortByMortionCode30MSBTask(firstIdx, lastIdx, msb); } } #else if ((firstIdx != lastIdx) && (msb >= 0)) { IndexKey30 *midIdx = std::partition(firstIdx, lastIdx, RadixComparatorUInt30(msb)); msb--; // decrement most-significant-bit RadixSortByMortionCode30MSB(firstIdx, midIdx, msb); // sort left partition RadixSortByMortionCode30MSB(midIdx, lastIdx, msb); // sort right partition } #endif } static void RadixSortByMortionCode60LSB(IndexKey60 *firstIdx, IndexKey60 *lastIdx) { for (int lsb = 0; lsb < 60; ++lsb) { std::stable_partition(firstIdx, lastIdx, RadixComparatorUInt60(lsb)); } } static void RadixSortByMortionCode60MSB(IndexKey60 *firstIdx, IndexKey60 *lastIdx, int msb = 59) { if ((firstIdx != lastIdx) && (msb >= 0)) { IndexKey60 *midIdx = std::partition(firstIdx, lastIdx, RadixComparatorUInt60(msb)); msb--; // decrement most-significant-bit RadixSortByMortionCode60MSB(firstIdx, midIdx, msb); // sort left partition RadixSortByMortionCode60MSB(midIdx, lastIdx, msb); // sort right partition } } void CalculateMortonCodes30(uint32_t *codes, const float *points, const real3 &bmin, const real3 &bmax, int64_t startIdx, int64_t endIdx); void CalculateMortonCodes30SIMD(uint32_t *codes, const float *points, const real3 &bmin, const real3 &bmax, int64_t startIdx, int64_t endIdx); void CalculateMortonCodesTriangleFloat30(uint32_t *codes, const float *vertices, const uint32_t *faces, const real3 &bmin, const real3 &bmax, int64_t startIdx, int64_t endIdx); void CalculateMortonCodesTetraFloat30(uint32_t *codes, const float *vertices, const uint32_t *faces, const real3 &bmin, const real3 &bmax, int64_t startIdx, int64_t endIdx); void CalculateMortonCodesTetraDouble30(uint32_t *codes, const double *vertices, const uint32_t *faces, const real3 &bmin, const real3 &bmax, int64_t startIdx, int64_t endIdx); void CalculateMortonCodesSolidFloat30(uint32_t *codes, int numVertsPerSolid, const float *vertices, const uint32_t *indices, const real3 &bmin, const real3 &bmax, int64_t startIdx, int64_t endIdx); void CalculateMortonCodesSolidDouble30(uint32_t *codes, int numVertsPerSolid, const double *vertices, const uint32_t *indices, const real3 &bmin, const real3 &bmax, int64_t startIdx, int64_t endIdx); void CalculateMortonCodes60(uint64_t *codes, const float *points, const real3 &bmin, const real3 &bmax, int64_t startIdx, int64_t endIdx); // Construct binary radix tree by 30bit Morton code. NodeInfo32 ConstructBinaryRadixTree30(const IndexKey30 *keys, int i, // i in range [0, n-2] uint32_t n); NodeInfo64 ConstructBinaryRadixTree60(const IndexKey60 *keys, int i, // i in range [0, n-2] uint64_t n); } // namespace } // namespace #endif // __PREFIX_TREE_UTIL_H__
6423.c
/* POLYBENCH/GPU-OPENMP * * This file is a part of the Polybench/GPU-OpenMP suite * * Contact: * William Killian <killian@udel.edu> * * Copyright 2013, The University of Delaware */ #include <stdio.h> #include <unistd.h> #include <string.h> #include <math.h> /* Include polybench common header. */ #include <polybench.h> /* Include benchmark-specific header. */ /* Default data type is double, default size is 4096x4096. */ #include "convolution-2d.h" /* Array initialization. */ static void init_array (int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj)) { // printf("Initializing Array\n"); int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { A[i][j] = ((DATA_TYPE) (i + j) / nj); } } /* DCE code. Must scan the entire live-out data. Can be used also to check the correctness of the output. */ static void print_array(int ni, int nj, DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; for (i = 0; i < ni; i++) for (j = 0; j < nj; j++) { fprintf(stderr, DATA_PRINTF_MODIFIER, B[i][j]); if ((i * NJ + j) % 20 == 0) fprintf(stderr, "\n"); } fprintf(stderr, "\n"); } /* Main computational kernel. The whole function will be timed, including the call and return. */ static void kernel_conv2d(int ni, int nj, DATA_TYPE POLYBENCH_2D(A,NI,NJ,ni,nj), DATA_TYPE POLYBENCH_2D(B,NI,NJ,ni,nj)) { int i, j; #pragma scop #pragma omp parallel for private(j) collapse(2) schedule(dynamic, 1) num_threads(2) for (i = 1; i < _PB_NI - 1; ++i) { for (j = 1; j < _PB_NJ - 1; ++j) { B[i][j] = 0.2 * A[i-1][j-1] + 0.5 * A[i-1][j] + -0.8 * A[i-1][j+1] + -0.3 * A[ i ][j-1] + 0.6 * A[ i ][j] + -0.9 * A[ i ][j+1] + 0.4 * A[i+1][j-1] + 0.7 * A[i+1][j] + 0.1 * A[i+1][j+1]; } } #pragma endscop // printf("Kernal computation complete !!\n"); } int main(int argc, char** argv) { /* Retrieve problem size. */ int ni = NI; int nj = NJ; /* Variable declaration/allocation. */ POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NJ, ni, nj); POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NI, NJ, ni, nj); /* Initialize array(s). */ init_array (ni, nj, POLYBENCH_ARRAY(A)); /* Start timer. */ //polybench_start_instruments; polybench_timer_start(); /* Run kernel. */ kernel_conv2d (ni, nj, POLYBENCH_ARRAY(A), POLYBENCH_ARRAY(B)); /* Stop and print timer. */ polybench_timer_stop(); polybench_timer_print(); //polybench_stop_instruments; //polybench_print_instruments; /* Prevent dead-code elimination. All live-out data must be printed by the function call in argument. */ polybench_prevent_dce(print_array(ni, nj, POLYBENCH_ARRAY(B))); /* Be clean. */ POLYBENCH_FREE_ARRAY(A); POLYBENCH_FREE_ARRAY(B); return 0; }
atomic_write_codegen.c
// RUN: %clang_cc1 -verify -triple x86_64-apple-darwin10 -fopenmp -x c -emit-llvm %s -o - | FileCheck %s // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -emit-pch -o %t %s // RUN: %clang_cc1 -fopenmp -x c -triple x86_64-apple-darwin10 -include-pch %t -verify %s -emit-llvm -o - | FileCheck %s // expected-no-diagnostics // REQUIRES: x86-registered-target #ifndef HEADER #define HEADER _Bool bv, bx; char cv, cx; unsigned char ucv, ucx; short sv, sx; unsigned short usv, usx; int iv, ix; unsigned int uiv, uix; long lv, lx; unsigned long ulv, ulx; long long llv, llx; unsigned long long ullv, ullx; float fv, fx; double dv, dx; long double ldv, ldx; _Complex int civ, cix; _Complex float cfv, cfx; _Complex double cdv, cdx; typedef int int4 __attribute__((__vector_size__(16))); int4 int4x; struct BitFields { int : 32; int a : 31; } bfx; struct BitFields_packed { int : 32; int a : 31; } __attribute__ ((__packed__)) bfx_packed; struct BitFields2 { int : 31; int a : 1; } bfx2; struct BitFields2_packed { int : 31; int a : 1; } __attribute__ ((__packed__)) bfx2_packed; struct BitFields3 { int : 11; int a : 14; } bfx3; struct BitFields3_packed { int : 11; int a : 14; } __attribute__ ((__packed__)) bfx3_packed; struct BitFields4 { short : 16; int a: 1; long b : 7; } bfx4; struct BitFields4_packed { short : 16; int a: 1; long b : 7; } __attribute__ ((__packed__)) bfx4_packed; typedef float float2 __attribute__((ext_vector_type(2))); float2 float2x; // Register "0" is currently an invalid register for global register variables. // Use "esp" instead of "0". // register int rix __asm__("0"); register int rix __asm__("esp"); int main() { // CHECK: load i8, i8* // CHECK: store atomic i8 #pragma omp atomic write bx = bv; // CHECK: load i8, i8* // CHECK: store atomic i8 #pragma omp atomic write cx = cv; // CHECK: load i8, i8* // CHECK: store atomic i8 #pragma omp atomic write ucx = ucv; // CHECK: load i16, i16* // CHECK: store atomic i16 #pragma omp atomic write sx = sv; // CHECK: load i16, i16* // CHECK: store atomic i16 #pragma omp atomic write usx = usv; // CHECK: load i32, i32* // CHECK: store atomic i32 #pragma omp atomic write ix = iv; // CHECK: load i32, i32* // CHECK: store atomic i32 #pragma omp atomic write uix = uiv; // CHECK: load i64, i64* // CHECK: store atomic i64 #pragma omp atomic write lx = lv; // CHECK: load i64, i64* // CHECK: store atomic i64 #pragma omp atomic write ulx = ulv; // CHECK: load i64, i64* // CHECK: store atomic i64 #pragma omp atomic write llx = llv; // CHECK: load i64, i64* // CHECK: store atomic i64 #pragma omp atomic write ullx = ullv; // CHECK: load float, float* // CHECK: bitcast float {{.*}} to i32 // CHECK: store atomic i32 {{.*}}, i32* bitcast (float* #pragma omp atomic write fx = fv; // CHECK: load double, double* // CHECK: bitcast double {{.*}} to i64 // CHECK: store atomic i64 {{.*}}, i64* bitcast (double* #pragma omp atomic write dx = dv; // CHECK: [[LD:%.+]] = load x86_fp80, x86_fp80* // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[LD]], x86_fp80* [[LDTEMP]] // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[LDTEMP:%.*]] to i128* // CHECK: [[LD:%.+]] = load i128, i128* [[BITCAST]] // CHECK: store atomic i128 [[LD]], i128* bitcast (x86_fp80* #pragma omp atomic write ldx = ldv; // CHECK: [[REAL_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.*}}, i32 0, i32 0) // CHECK: [[IMG_VAL:%.+]] = load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.*}}, i32 0, i32 1) // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1 // CHECK: store i32 [[REAL_VAL]], i32* [[TEMP_REAL_REF]] // CHECK: store i32 [[IMG_VAL]], i32* [[TEMP_IMG_REF]] // CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8* // CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.*}} to i8*), i8* [[BITCAST]], i32 0) #pragma omp atomic write cix = civ; // CHECK: [[REAL_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 0) // CHECK: [[IMG_VAL:%.+]] = load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 1) // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP:%.+]], i32 0, i32 0 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { float, float }, { float, float }* [[TEMP]], i32 0, i32 1 // CHECK: store float [[REAL_VAL]], float* [[TEMP_REAL_REF]] // CHECK: store float [[IMG_VAL]], float* [[TEMP_IMG_REF]] // CHECK: [[BITCAST:%.+]] = bitcast { float, float }* [[TEMP]] to i8* // CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ float, float }* @{{.*}} to i8*), i8* [[BITCAST]], i32 0) #pragma omp atomic write cfx = cfv; // CHECK: [[REAL_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.*}}, i32 0, i32 0) // CHECK: [[IMG_VAL:%.+]] = load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.*}}, i32 0, i32 1) // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP:%.+]], i32 0, i32 0 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { double, double }, { double, double }* [[TEMP]], i32 0, i32 1 // CHECK: store double [[REAL_VAL]], double* [[TEMP_REAL_REF]] // CHECK: store double [[IMG_VAL]], double* [[TEMP_IMG_REF]] // CHECK: [[BITCAST:%.+]] = bitcast { double, double }* [[TEMP]] to i8* // CHECK: call void @__atomic_store(i64 16, i8* bitcast ({ double, double }* @{{.*}} to i8*), i8* [[BITCAST]], i32 5) // CHECK: call{{.*}} @__kmpc_flush( #pragma omp atomic seq_cst write cdx = cdv; // CHECK: load i8, i8* // CHECK: store atomic i64 #pragma omp atomic write ulx = bv; // CHECK: load i8, i8* // CHECK: store atomic i8 #pragma omp atomic write bx = cv; // CHECK: load i8, i8* // CHECK: store atomic i8 // CHECK: call{{.*}} @__kmpc_flush( #pragma omp atomic write, seq_cst cx = ucv; // CHECK: load i16, i16* // CHECK: store atomic i64 #pragma omp atomic write ulx = sv; // CHECK: load i16, i16* // CHECK: store atomic i64 #pragma omp atomic write lx = usv; // CHECK: load i32, i32* // CHECK: store atomic i32 // CHECK: call{{.*}} @__kmpc_flush( #pragma omp atomic seq_cst, write uix = iv; // CHECK: load i32, i32* // CHECK: store atomic i32 #pragma omp atomic write ix = uiv; // CHECK: load i64, i64* // CHECK: [[VAL:%.+]] = trunc i64 %{{.*}} to i32 // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1 // CHECK: store i32 [[VAL]], i32* [[TEMP_REAL_REF]] // CHECK: store i32 0, i32* [[TEMP_IMG_REF]] // CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8* // CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* [[BITCAST]], i32 0) #pragma omp atomic write cix = lv; // CHECK: load i64, i64* // CHECK: store atomic i32 %{{.+}}, i32* bitcast (float* #pragma omp atomic write fx = ulv; // CHECK: load i64, i64* // CHECK: store atomic i64 %{{.+}}, i64* bitcast (double* #pragma omp atomic write dx = llv; // CHECK: load i64, i64* // CHECK: [[VAL:%.+]] = uitofp i64 %{{.+}} to x86_fp80 // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP:%.+]] to i8* // CHECK: call void @llvm.memset.p0i8.i64(i8* [[BITCAST]], i8 0, i64 16, i32 16, i1 false) // CHECK: store x86_fp80 [[VAL]], x86_fp80* [[TEMP]] // CHECK: [[BITCAST:%.+]] = bitcast x86_fp80* [[TEMP]] to i128* // CHECK: [[VAL:%.+]] = load i128, i128* [[BITCAST]] // CHECK: store atomic i128 [[VAL]], i128* bitcast (x86_fp80* #pragma omp atomic write ldx = ullv; // CHECK: load float, float* // CHECK: [[VAL:%.+]] = fptosi float %{{.*}} to i32 // CHECK: [[TEMP_REAL_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP:%.+]], i32 0, i32 0 // CHECK: [[TEMP_IMG_REF:%.+]] = getelementptr inbounds { i32, i32 }, { i32, i32 }* [[TEMP]], i32 0, i32 1 // CHECK: store i32 [[VAL]], i32* [[TEMP_REAL_REF]] // CHECK: store i32 0, i32* [[TEMP_IMG_REF]] // CHECK: [[BITCAST:%.+]] = bitcast { i32, i32 }* [[TEMP]] to i8* // CHECK: call void @__atomic_store(i64 8, i8* bitcast ({ i32, i32 }* @{{.+}} to i8*), i8* [[BITCAST]], i32 0) #pragma omp atomic write cix = fv; // CHECK: load double, double* // CHECK: store atomic i16 #pragma omp atomic write sx = dv; // CHECK: load x86_fp80, x86_fp80* // CHECK: store atomic i8 #pragma omp atomic write bx = ldv; // CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 0) // CHECK: load i32, i32* getelementptr inbounds ({ i32, i32 }, { i32, i32 }* @{{.+}}, i32 0, i32 1) // CHECK: icmp ne i32 %{{.+}}, 0 // CHECK: icmp ne i32 %{{.+}}, 0 // CHECK: or i1 // CHECK: store atomic i8 #pragma omp atomic write bx = civ; // CHECK: load float, float* getelementptr inbounds ({ float, float }, { float, float }* @{{.*}}, i32 0, i32 0) // CHECK: store atomic i16 #pragma omp atomic write usx = cfv; // CHECK: load double, double* getelementptr inbounds ({ double, double }, { double, double }* @{{.+}}, i32 0, i32 0) // CHECK: store atomic i64 #pragma omp atomic write llx = cdv; // CHECK-DAG: [[IDX:%.+]] = load i16, i16* @{{.+}} // CHECK-DAG: load i8, i8* // CHECK-DAG: [[VEC_ITEM_VAL:%.+]] = zext i1 %{{.+}} to i32 // CHECK: [[I128VAL:%.+]] = load atomic i128, i128* bitcast (<4 x i32>* [[DEST:@.+]] to i128*) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_I128:%.+]] = phi i128 [ [[I128VAL]], %{{.+}} ], [ [[FAILED_I128_OLD_VAL:%.+]], %[[CONT]] ] // CHECK: [[BITCAST:%.+]] = bitcast <4 x i32>* [[LDTEMP:%.+]] to i128* // CHECK: store i128 [[OLD_I128]], i128* [[BITCAST]], // CHECK: [[VEC_VAL:%.+]] = load <4 x i32>, <4 x i32>* [[LDTEMP]] // CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <4 x i32> [[VEC_VAL]], i32 [[VEC_ITEM_VAL]], i16 [[IDX]] // CHECK: store <4 x i32> [[NEW_VEC_VAL]], <4 x i32>* [[LDTEMP]] // CHECK: [[NEW_I128:%.+]] = load i128, i128* [[BITCAST]] // CHECK: [[RES:%.+]] = cmpxchg i128* bitcast (<4 x i32>* [[DEST]] to i128*), i128 [[OLD_I128]], i128 [[NEW_I128]] monotonic monotonic // CHECK: [[FAILED_I128_OLD_VAL:%.+]] = extractvalue { i128, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i128, i1 } [[RES]], 1 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] // CHECK: [[EXIT]] #pragma omp atomic write int4x[sv] = bv; // CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 // CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] // CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647 // CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -2147483648 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]] // CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i32* bitcast (i8* getelementptr (i8, i8* bitcast (%struct.BitFields* @{{.+}} to i8*), i64 4) to i32*), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] // CHECK: [[EXIT]] #pragma omp atomic write bfx.a = ldv; // CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 // CHECK: [[BITCAST:%.+]] = bitcast i32* [[LDTEMP:%.+]] to i8* // CHECK: call void @__atomic_load(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST]], i32 0) // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]], // CHECK: store i32 [[OLD_BF_VALUE]], i32* [[LDTEMP1:%.+]], // CHECK: [[OLD_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP1]], // CHECK: [[BF_VALUE:%.+]] = and i32 [[NEW_VAL]], 2147483647 // CHECK: [[BF_CLEAR:%.+]] = and i32 [[OLD_BF_VALUE]], -2147483648 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i32 %{{.+}}, i32* [[LDTEMP1]] // CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i32* [[LDTEMP]] to i8* // CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i32* [[LDTEMP1]] to i8* // CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 4, i8* getelementptr (i8, i8* bitcast (%struct.BitFields_packed* @{{.+}} to i8*), i64 4), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0) // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] // CHECK: [[EXIT]] #pragma omp atomic write bfx_packed.a = ldv; // CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 // CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] // CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 1 // CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 31 // CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, 2147483647 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]] // CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields2, %struct.BitFields2* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] // CHECK: [[EXIT]] #pragma omp atomic write bfx2.a = ldv; // CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 // CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] // CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8 // CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 1 // CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 7 // CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 127 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]] // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr (i8, i8* bitcast (%struct.BitFields2_packed* @{{.+}} to i8*), i64 3), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] // CHECK: [[EXIT]] #pragma omp atomic write bfx2_packed.a = ldv; // CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 // CHECK: [[PREV_VALUE:%.+]] = load atomic i32, i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i32 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] // CHECK: [[BF_AND:%.+]] = and i32 [[NEW_VAL]], 16383 // CHECK: [[BF_VALUE:%.+]] = shl i32 [[BF_AND]], 11 // CHECK: [[BF_CLEAR:%.+]] = and i32 %{{.+}}, -33552385 // CHECK: or i32 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i32 %{{.+}}, i32* [[LDTEMP:%.+]] // CHECK: [[NEW_BF_VALUE:%.+]] = load i32, i32* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i32* getelementptr inbounds (%struct.BitFields3, %struct.BitFields3* @{{.+}}, i32 0, i32 0), i32 [[OLD_BF_VALUE]], i32 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i32, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i32, i1 } [[RES]], 1 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] // CHECK: [[EXIT]] #pragma omp atomic write bfx3.a = ldv; // CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 // CHECK: [[LDTEMP:%.+]] = bitcast i32* %{{.+}} to i24* // CHECK: [[BITCAST:%.+]] = bitcast i24* %{{.+}} to i8* // CHECK: call void @__atomic_load(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST]], i32 0) // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_VAL:%.+]] = load i24, i24* %{{.+}}, // CHECK: store i24 [[OLD_VAL]], i24* [[TEMP:%.+]], // CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i24 // CHECK: [[BF_AND:%.+]] = and i24 [[TRUNC]], 16383 // CHECK: [[BF_VALUE:%.+]] = shl i24 [[BF_AND]], 3 // CHECK: [[BF_CLEAR:%.+]] = and i24 %{{.+}}, -131065 // CHECK: or i24 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i24 %{{.+}}, i24* [[TEMP]] // CHECK: [[BITCAST_TEMP_OLD_BF_ADDR:%.+]] = bitcast i24* [[LDTEMP]] to i8* // CHECK: [[BITCAST_TEMP_NEW_BF_ADDR:%.+]] = bitcast i24* [[TEMP]] to i8* // CHECK: [[FAIL_SUCCESS:%.+]] = call zeroext i1 @__atomic_compare_exchange(i64 3, i8* getelementptr (i8, i8* bitcast (%struct.BitFields3_packed* @{{.+}} to i8*), i64 1), i8* [[BITCAST_TEMP_OLD_BF_ADDR]], i8* [[BITCAST_TEMP_NEW_BF_ADDR]], i32 0, i32 0) // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] // CHECK: [[EXIT]] #pragma omp atomic write bfx3_packed.a = ldv; // CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 // CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] // CHECK: [[ZEXT:%.+]] = zext i32 [[NEW_VAL]] to i64 // CHECK: [[BF_AND:%.+]] = and i64 [[ZEXT]], 1 // CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 16 // CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -65537 // CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]] // CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] // CHECK: [[EXIT]] #pragma omp atomic write bfx4.a = ldv; // CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i32 // CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] // CHECK: [[TRUNC:%.+]] = trunc i32 [[NEW_VAL]] to i8 // CHECK: [[BF_VALUE:%.+]] = and i8 [[TRUNC]], 1 // CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, -2 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]] // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] // CHECK: [[EXIT]] #pragma omp atomic write bfx4_packed.a = ldv; // CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64 // CHECK: [[PREV_VALUE:%.+]] = load atomic i64, i64* bitcast (%struct.BitFields4* @{{.+}} to i64*) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i64 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] // CHECK: [[BF_AND:%.+]] = and i64 [[NEW_VAL]], 127 // CHECK: [[BF_VALUE:%.+]] = shl i64 [[BF_AND]], 17 // CHECK: [[BF_CLEAR:%.+]] = and i64 %{{.+}}, -16646145 // CHECK: or i64 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i64 %{{.+}}, i64* [[LDTEMP:%.+]] // CHECK: [[NEW_BF_VALUE:%.+]] = load i64, i64* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (%struct.BitFields4* @{{.+}} to i64*), i64 [[OLD_BF_VALUE]], i64 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i64, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] // CHECK: [[EXIT]] #pragma omp atomic write bfx4.b = ldv; // CHECK: load x86_fp80, x86_fp80* @{{.+}} // CHECK: [[NEW_VAL:%.+]] = fptosi x86_fp80 %{{.+}} to i64 // CHECK: [[PREV_VALUE:%.+]] = load atomic i8, i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_BF_VALUE:%.+]] = phi i8 [ [[PREV_VALUE]], %[[EXIT]] ], [ [[FAILED_OLD_VAL:%.+]], %[[CONT]] ] // CHECK: [[TRUNC:%.+]] = trunc i64 [[NEW_VAL]] to i8 // CHECK: [[BF_AND:%.+]] = and i8 [[TRUNC]], 127 // CHECK: [[BF_VALUE:%.+]] = shl i8 [[BF_AND]], 1 // CHECK: [[BF_CLEAR:%.+]] = and i8 %{{.+}}, 1 // CHECK: or i8 [[BF_CLEAR]], [[BF_VALUE]] // CHECK: store i8 %{{.+}}, i8* [[LDTEMP:%.+]] // CHECK: [[NEW_BF_VALUE:%.+]] = load i8, i8* [[LDTEMP]] // CHECK: [[RES:%.+]] = cmpxchg i8* getelementptr inbounds (%struct.BitFields4_packed, %struct.BitFields4_packed* @{{.+}}, i32 0, i32 0, i64 2), i8 [[OLD_BF_VALUE]], i8 [[NEW_BF_VALUE]] monotonic monotonic // CHECK: [[FAILED_OLD_VAL]] = extractvalue { i8, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i8, i1 } [[RES]], 1 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] // CHECK: [[EXIT]] #pragma omp atomic write bfx4_packed.b = ldv; // CHECK: load i64, i64* // CHECK: [[VEC_ITEM_VAL:%.+]] = uitofp i64 %{{.+}} to float // CHECK: [[I64VAL:%.+]] = load atomic i64, i64* bitcast (<2 x float>* [[DEST:@.+]] to i64*) monotonic // CHECK: br label %[[CONT:.+]] // CHECK: [[CONT]] // CHECK: [[OLD_I64:%.+]] = phi i64 [ [[I64VAL]], %{{.+}} ], [ [[FAILED_I64_OLD_VAL:%.+]], %[[CONT]] ] // CHECK: [[BITCAST:%.+]] = bitcast <2 x float>* [[LDTEMP:%.+]] to i64* // CHECK: store i64 [[OLD_I64]], i64* [[BITCAST]], // CHECK: [[VEC_VAL:%.+]] = load <2 x float>, <2 x float>* [[LDTEMP]] // CHECK: [[NEW_VEC_VAL:%.+]] = insertelement <2 x float> [[VEC_VAL]], float [[VEC_ITEM_VAL]], i64 0 // CHECK: store <2 x float> [[NEW_VEC_VAL]], <2 x float>* [[LDTEMP]] // CHECK: [[NEW_I64:%.+]] = load i64, i64* [[BITCAST]] // CHECK: [[RES:%.+]] = cmpxchg i64* bitcast (<2 x float>* [[DEST]] to i64*), i64 [[OLD_I64]], i64 [[NEW_I64]] monotonic monotonic // CHECK: [[FAILED_I64_OLD_VAL:%.+]] = extractvalue { i64, i1 } [[RES]], 0 // CHECK: [[FAIL_SUCCESS:%.+]] = extractvalue { i64, i1 } [[RES]], 1 // CHECK: br i1 [[FAIL_SUCCESS]], label %[[EXIT:.+]], label %[[CONT]] // CHECK: [[EXIT]] #pragma omp atomic write float2x.x = ulv; // CHECK: call i32 @llvm.read_register.i32( // CHECK: sitofp i32 %{{.+}} to double // CHECK: bitcast double %{{.+}} to i64 // CHECK: store atomic i64 %{{.+}}, i64* bitcast (double* @{{.+}} to i64*) seq_cst // CHECK: call{{.*}} @__kmpc_flush( #pragma omp atomic write seq_cst dv = rix; return 0; } #endif
parallel.c
/* Copyright (C) 2005-2017 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>. This file is part of the GNU Offloading and Multi Processing Library (libgomp). Libgomp is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* This file handles the (bare) PARALLEL construct. */ #include "libgomp.h" #include <limits.h> /* Determine the number of threads to be launched for a PARALLEL construct. This algorithm is explicitly described in OpenMP 3.0 section 2.4.1. SPECIFIED is a combination of the NUM_THREADS clause and the IF clause. If the IF clause is false, SPECIFIED is forced to 1. When NUM_THREADS is not present, SPECIFIED is 0. */ unsigned gomp_resolve_num_threads (unsigned specified, unsigned count) { struct gomp_thread *thr = gomp_thread (); struct gomp_task_icv *icv; unsigned threads_requested, max_num_threads, num_threads; unsigned long busy; struct gomp_thread_pool *pool; icv = gomp_icv (false); if (specified == 1) return 1; else if (thr->ts.active_level >= 1 && !icv->nest_var) return 1; else if (thr->ts.active_level >= gomp_max_active_levels_var) return 1; /* If NUM_THREADS not specified, use nthreads_var. */ if (specified == 0) threads_requested = icv->nthreads_var; else threads_requested = specified; max_num_threads = threads_requested; /* If dynamic threads are enabled, bound the number of threads that we launch. */ if (icv->dyn_var) { unsigned dyn = gomp_dynamic_max_threads (); if (dyn < max_num_threads) max_num_threads = dyn; /* Optimization for parallel sections. */ if (count && count < max_num_threads) max_num_threads = count; } /* UINT_MAX stands for infinity. */ if (__builtin_expect (icv->thread_limit_var == UINT_MAX, 1) || max_num_threads == 1) return max_num_threads; /* The threads_busy counter lives in thread_pool, if there isn't a thread_pool yet, there must be just one thread in the contention group. If thr->team is NULL, this isn't nested parallel, so there is just one thread in the contention group as well, no need to handle it atomically. */ pool = thr->thread_pool; if (thr->ts.team == NULL || pool == NULL) { num_threads = max_num_threads; if (num_threads > icv->thread_limit_var) num_threads = icv->thread_limit_var; if (pool) pool->threads_busy = num_threads; return num_threads; } #ifdef HAVE_SYNC_BUILTINS do { busy = pool->threads_busy; num_threads = max_num_threads; if (icv->thread_limit_var - busy + 1 < num_threads) num_threads = icv->thread_limit_var - busy + 1; } while (__sync_val_compare_and_swap (&pool->threads_busy, busy, busy + num_threads - 1) != busy); #else gomp_mutex_lock (&gomp_managed_threads_lock); num_threads = max_num_threads; busy = pool->threads_busy; if (icv->thread_limit_var - busy + 1 < num_threads) num_threads = icv->thread_limit_var - busy + 1; pool->threads_busy += num_threads - 1; gomp_mutex_unlock (&gomp_managed_threads_lock); #endif return num_threads; } void GOMP_parallel_start (void (*fn) (void *), void *data, unsigned num_threads) { num_threads = gomp_resolve_num_threads (num_threads, 0); gomp_team_start (fn, data, num_threads, 0, gomp_new_team (num_threads)); } void GOMP_parallel_end (void) { struct gomp_task_icv *icv = gomp_icv (false); if (__builtin_expect (icv->thread_limit_var != UINT_MAX, 0)) { struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; unsigned int nthreads = team ? team->nthreads : 1; gomp_team_end (); if (nthreads > 1) { /* If not nested, there is just one thread in the contention group left, no need for atomicity. */ if (thr->ts.team == NULL) thr->thread_pool->threads_busy = 1; else { #ifdef HAVE_SYNC_BUILTINS __sync_fetch_and_add (&thr->thread_pool->threads_busy, 1UL - nthreads); #else gomp_mutex_lock (&gomp_managed_threads_lock); thr->thread_pool->threads_busy -= nthreads - 1; gomp_mutex_unlock (&gomp_managed_threads_lock); #endif } } } else gomp_team_end (); } ialias (GOMP_parallel_end) void GOMP_parallel (void (*fn) (void *), void *data, unsigned num_threads, unsigned int flags) { num_threads = gomp_resolve_num_threads (num_threads, 0); gomp_team_start (fn, data, num_threads, flags, gomp_new_team (num_threads)); fn (data); ialias_call (GOMP_parallel_end) (); } bool GOMP_cancellation_point (int which) { if (!gomp_cancel_var) return false; struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS)) { if (team == NULL) return false; return team->work_share_cancelled != 0; } else if (which & GOMP_CANCEL_TASKGROUP) { if (thr->task->taskgroup && thr->task->taskgroup->cancelled) return true; /* FALLTHRU into the GOMP_CANCEL_PARALLEL case, as #pragma omp cancel parallel also cancels all explicit tasks. */ } if (team) return gomp_team_barrier_cancelled (&team->barrier); return false; } ialias (GOMP_cancellation_point) bool GOMP_cancel (int which, bool do_cancel) { if (!gomp_cancel_var) return false; if (!do_cancel) return ialias_call (GOMP_cancellation_point) (which); struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; if (which & (GOMP_CANCEL_LOOP | GOMP_CANCEL_SECTIONS)) { /* In orphaned worksharing region, all we want to cancel is current thread. */ if (team != NULL) team->work_share_cancelled = 1; return true; } else if (which & GOMP_CANCEL_TASKGROUP) { if (thr->task->taskgroup && !thr->task->taskgroup->cancelled) { gomp_mutex_lock (&team->task_lock); thr->task->taskgroup->cancelled = true; gomp_mutex_unlock (&team->task_lock); } return true; } team->team_cancelled = 1; gomp_team_barrier_cancel (team); return true; } /* The public OpenMP API for thread and team related inquiries. */ int omp_get_num_threads (void) { struct gomp_team *team = gomp_thread ()->ts.team; return team ? team->nthreads : 1; } int omp_get_thread_num (void) { return gomp_thread ()->ts.team_id; } /* This wasn't right for OpenMP 2.5. Active region used to be non-zero when the IF clause doesn't evaluate to false, starting with OpenMP 3.0 it is non-zero with more than one thread in the team. */ int omp_in_parallel (void) { return gomp_thread ()->ts.active_level > 0; } int omp_get_level (void) { return gomp_thread ()->ts.level; } int omp_get_ancestor_thread_num (int level) { struct gomp_team_state *ts = &gomp_thread ()->ts; if (level < 0 || level > ts->level) return -1; for (level = ts->level - level; level > 0; --level) ts = &ts->team->prev_ts; return ts->team_id; } int omp_get_team_size (int level) { struct gomp_team_state *ts = &gomp_thread ()->ts; if (level < 0 || level > ts->level) return -1; for (level = ts->level - level; level > 0; --level) ts = &ts->team->prev_ts; if (ts->team == NULL) return 1; else return ts->team->nthreads; } int omp_get_active_level (void) { return gomp_thread ()->ts.active_level; } ialias (omp_get_num_threads) ialias (omp_get_thread_num) ialias (omp_in_parallel) ialias (omp_get_level) ialias (omp_get_ancestor_thread_num) ialias (omp_get_team_size) ialias (omp_get_active_level)
ThreadPool.h
/* Copyright (c) 2017, Michael Kazhdan All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. Neither the name of the Johns Hopkins University nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef THREADPOOL_H_ #define THREADPOOL_H_ #include <omp.h> #include <algorithm> #include <atomic> #include <condition_variable> #include <future> #include <thread> #include <vector> struct ThreadPool { static const size_t chunk_size = 128; static bool _Close; static volatile unsigned int _RemainingTasks; static std::condition_variable _WaitingForWorkOrClose, _DoneWithWork; static std::vector<std::thread> _Threads; static std::function<void(unsigned int)> _ThreadFunction; static void Parallel_for(size_t begin, size_t end, const std::function<void(unsigned int, size_t)> &iterationFunction) { if (begin >= end) { return; } size_t range = end - begin; size_t chunks = (range + chunk_size - 1) / chunk_size; unsigned int threads = (unsigned int)NumThreads(); std::atomic<size_t> index; index.store(0); if (range < chunk_size || threads == 1) { for (size_t i = begin; i < end; i++) { iterationFunction(0, i); } return; } auto _ChunkFunction = [&iterationFunction, begin, end]( unsigned int thread, size_t chunk) { const size_t _begin = begin + chunk_size * chunk; const size_t _end = std::min<size_t>(end, _begin + chunk_size); for (size_t i = _begin; i < _end; i++) { iterationFunction(thread, i); } }; _ThreadFunction = [&_ChunkFunction, chunks, &index](unsigned int thread) { size_t chunk; while ((chunk = index.fetch_add(1)) < chunks) { _ChunkFunction(thread, chunk); } }; #pragma omp parallel for for (size_t c = 0; c < chunks; c++) { _ChunkFunction(omp_get_thread_num(), c); } } static unsigned int NumThreads(void) { return (unsigned int)_Threads.size() + 1; } static void Init( unsigned int numThreads = std::thread::hardware_concurrency()) { if (_Threads.size() && !_Close) { _Close = true; _WaitingForWorkOrClose.notify_all(); for (unsigned int t = 0; t < _Threads.size(); t++) _Threads[t].join(); } _Close = true; numThreads--; _Threads.resize(numThreads); } static void Terminate(void) { if (_Threads.size() && !_Close) { _Close = true; _WaitingForWorkOrClose.notify_all(); for (unsigned int t = 0; t < _Threads.size(); t++) _Threads[t].join(); _Threads.resize(0); } } template <typename... Functions> static void ParallelSections(const Functions &... functions) { std::vector<std::future<void>> futures(sizeof...(Functions)); _ParallelSections(&futures[0], functions...); for (size_t t = 0; t < futures.size(); t++) futures[t].get(); } template <typename Function> static void _ParallelSections(std::future<void> *futures, const Function &function) { *futures = std::async(std::launch::async, function); } template <typename Function, typename... Functions> static void _ParallelSections(std::future<void> *futures, const Function &function, const Functions &... functions) { *futures = std::async(std::launch::async, function); _ParallelSections(futures + 1, functions...); } }; // Hack to avoid using the linker for now bool ThreadPool::_Close; volatile unsigned int ThreadPool::_RemainingTasks; std::condition_variable ThreadPool::_WaitingForWorkOrClose; std::condition_variable ThreadPool::_DoneWithWork; std::vector<std::thread> ThreadPool::_Threads; std::function<void(unsigned int)> ThreadPool::_ThreadFunction; #endif // THREADPOOL_H_
Vec.h
#ifndef VEC_H #define VEC_H /* Szymon Rusinkiewicz Princeton University Vec.h Class for a constant-length vector Supports the following operations: vec v1; // Initialized to (0,0,0) vec v2(1,2,3); // Initialized to (1,2,3) vec v3(v2); // Copy constructor float farray[3]; vec v4 = vec(farray); // Explicit: "v4 = farray" won't work Vec<3,double> vd; // The "vec" used above is Vec<3,float> point p1, p2, p3; // Same as vec v3 = v1 + v2; // Also -, *, / (all componentwise) v3 = 3.5f * v1; // Also vec * scalar, vec / scalar // NOTE: scalar has to be the same type: // it won't work to do double * vec<float> v1 = min(v2,v3); // Componentwise min/max v1 = sin(v2); // Componentwise - all the usual functions... swap(v1,v2); // In-place swap v3 = v1 DOT v2; // Actually operator^ v3 = v1 CROSS v2; // Actually operator% float f = v1[0]; // Subscript float *fp = v1; // Implicit conversion to float * f = len(v1); // Length (also len2 == squared length) f = dist(p1, p2); // Distance (also dist2 == squared distance) normalize(v1); // Normalize (i.e., make it unit length) // normalize(vec(0,0,0)) => vec(1,0,0) v1 = trinorm(p1,p2,p3); // Normal of triangle cout << v1 << endl; // iostream output in the form (1,2,3) cin >> v2; // iostream input using the same syntax Also defines the utility functions sqr, cube, sgn, fract, clamp, mix, step, smoothstep, faceforward, reflect, and refract */ // Windows defines these as macros, which prevents us from using the // type-safe versions from std::, as well as interfering with method defns #undef min #undef max #include <cmath> #include <iostream> #include <algorithm> using std::min; using std::max; using std::swap; using std::sqrt; // Let gcc optimize conditional branches a bit better... #ifndef likely # if !defined(__GNUC__) || (__GNUC__ == 2 && __GNUC_MINOR__ < 96) # define likely(x) (x) # define unlikely(x) (x) # else # define likely(x) (__builtin_expect((x), 1)) # define unlikely(x) (__builtin_expect((x), 0)) # endif #endif // Boost-like compile-time assertion checking template <bool X> struct VEC_STATIC_ASSERTION_FAILURE; template <> struct VEC_STATIC_ASSERTION_FAILURE<true> { void operator () () {} }; #define VEC_STATIC_CHECK(expr) VEC_STATIC_ASSERTION_FAILURE<bool(expr)>() template <int D, class T = float> class Vec { protected: T v[D]; public: // Constructor for no arguments. Everything initialized to 0. Vec() { for (int i = 0; i < D; i++) v[i] = T(0); } // Uninitialized constructor - meant mostly for internal use #define VEC_UNINITIALIZED ((void *) 0) Vec(void *) {} // Constructors for 2-4 arguments Vec(T x, T y) { VEC_STATIC_CHECK(D == 2); v[0] = x; v[1] = y; } Vec(T x, T y, T z) { VEC_STATIC_CHECK(D == 3); v[0] = x; v[1] = y; v[2] = z; } Vec(T x, T y, T z, T w) { VEC_STATIC_CHECK(D == 4); v[0] = x; v[1] = y; v[2] = z; v[3] = w; } // Constructor from anything that can be accessed using [] // Pretty aggressive, so marked as explicit. template <class S> explicit Vec(const S &x) { for (int i = 0; i < D; i++) v[i] = T(x[i]); } // No destructor or assignment operator needed // Array reference and conversion to pointer - no bounds checking const T &operator [] (int i) const { return v[i]; } T &operator [] (int i) { return v[i]; } operator const T * () const { return v; } operator const T * () { return v; } operator T * () { return v; } // Member operators Vec<D,T> &operator += (const Vec<D,T> &x) { for (int i = 0; i < D; i++) #pragma omp atomic v[i] += x[i]; return *this; } Vec<D,T> &operator -= (const Vec<D,T> &x) { for (int i = 0; i < D; i++) #pragma omp atomic v[i] -= x[i]; return *this; } Vec<D,T> &operator *= (const Vec<D,T> &x) { for (int i = 0; i < D; i++) #pragma omp atomic v[i] *= x[i]; return *this; } Vec<D,T> &operator *= (const T &x) { for (int i = 0; i < D; i++) #pragma omp atomic v[i] *= x; return *this; } Vec<D,T> &operator /= (const Vec<D,T> &x) { for (int i = 0; i < D; i++) #pragma omp atomic v[i] /= x[i]; return *this; } Vec<D,T> &operator /= (const T &x) { for (int i = 0; i < D; i++) #pragma omp atomic v[i] /= x; return *this; } // Set each component to min/max of this and the other vector Vec<D,T> &min(const Vec<D,T> &x) { #pragma omp critical for (int i = 0; i < D; i++) if (x[i] < v[i]) v[i] = x[i]; return *this; } Vec<D,T> &max(const Vec<D,T> &x) { #pragma omp critical for (int i = 0; i < D; i++) if (x[i] > v[i]) v[i] = x[i]; return *this; } // Outside of class: + - * / % ^ << >> // Some partial compatibility with valarrays and vectors typedef T value_type; size_t size() const { return D; } T sum() const { T total = v[0]; for (int i = 1; i < D; i++) total += v[i]; return total; } T avg() const { return sum() / D; } T product() const { T total = v[0]; for (int i = 1; i < D; i++) total *= v[i]; return total; } T min() const { T m = v[0]; for (int i = 1; i < D; i++) if (v[i] < m) m = v[i]; return m; } T max() const { T m = v[0]; for (int i = 1; i < D; i++) if (v[i] > m) m = v[i]; return m; } T *begin() { return &(v[0]); } const T *begin() const { return &(v[0]); } T *end() { return begin() + D; } const T *end() const { return begin() + D; } void clear() { for (int i = 0; i < D; i++) v[i] = T(0); } bool empty() const { for (int i = 0; i < D; i++) if (v[i]) return false; return true; } Vec<D,T> apply(T func(T)) const { Vec<D,T> result(VEC_UNINITIALIZED); for (int i = 0; i < D; i++) result[i] = func(v[i]); return result; } Vec<D,T> apply(T func(const T&)) const { Vec<D,T> result(VEC_UNINITIALIZED); for (int i = 0; i < D; i++) result[i] = func(v[i]); return result; } }; typedef Vec<3,float> vec; typedef Vec<3,float> point; typedef Vec<2,float> vec2; typedef Vec<3,float> vec3; typedef Vec<4,float> vec4; typedef Vec<2,int> ivec2; typedef Vec<3,int> ivec3; typedef Vec<4,int> ivec4; // Nonmember operators that take two Vecs template <int D, class T> static inline const Vec<D,T> operator + (const Vec<D,T> &v1, const Vec<D,T> &v2) { Vec<D,T> result(VEC_UNINITIALIZED); for (int i = 0; i < D; i++) result[i] = v1[i] + v2[i]; return result; } template <int D, class T> static inline const Vec<D,T> operator - (const Vec<D,T> &v1, const Vec<D,T> &v2) { Vec<D,T> result(VEC_UNINITIALIZED); for (int i = 0; i < D; i++) result[i] = v1[i] - v2[i]; return result; } template <int D, class T> static inline const Vec<D,T> operator * (const Vec<D,T> &v1, const Vec<D,T> &v2) { Vec<D,T> result(VEC_UNINITIALIZED); for (int i = 0; i < D; i++) result[i] = v1[i] * v2[i]; return result; } template <int D, class T> static inline const Vec<D,T> operator / (const Vec<D,T> &v1, const Vec<D,T> &v2) { Vec<D,T> result(VEC_UNINITIALIZED); for (int i = 0; i < D; i++) result[i] = v1[i] / v2[i]; return result; } // Dot product in any dimension template <int D, class T> static inline const T operator ^ (const Vec<D,T> &v1, const Vec<D,T> &v2) { T sum = v1[0] * v2[0]; for (int i = 1; i < D; i++) sum += v1[i] * v2[i]; return sum; } #define DOT ^ // Cross product - only in 3 dimensions template <class T> static inline const Vec<3,T> operator % (const Vec<3,T> &v1, const Vec<3,T> &v2) { return Vec<3,T>(v1[1]*v2[2] - v1[2]*v2[1], v1[2]*v2[0] - v1[0]*v2[2], v1[0]*v2[1] - v1[1]*v2[0]); } #define CROSS % // Component-wise equality and inequality (#include the usual caveats // about comparing floats for equality...) template <int D, class T> static inline bool operator == (const Vec<D,T> &v1, const Vec<D,T> &v2) { for (int i = 0; i < D; i++) if (v1[i] != v2[i]) return false; return true; } template <int D, class T> static inline bool operator != (const Vec<D,T> &v1, const Vec<D,T> &v2) { for (int i = 0; i < D; i++) if (v1[i] != v2[i]) return true; return false; } // Unary operators template <int D, class T> static inline const Vec<D,T> &operator + (const Vec<D,T> &v) { return v; } template <int D, class T> static inline const Vec<D,T> operator - (const Vec<D,T> &v) { Vec<D,T> result(VEC_UNINITIALIZED); for (int i = 0; i < D; i++) result[i] = -v[i]; return result; } template <int D, class T> static inline bool operator ! (const Vec<D,T> &v) { return v.empty(); } // Vec/scalar operators template <int D, class T> static inline const Vec<D,T> operator * (const T &x, const Vec<D,T> &v) { Vec<D,T> result(VEC_UNINITIALIZED); for (int i = 0; i < D; i++) result[i] = x * v[i]; return result; } template <int D, class T> static inline const Vec<D,T> operator * (const Vec<D,T> &v, const T &x) { Vec<D,T> result(VEC_UNINITIALIZED); for (int i = 0; i < D; i++) result[i] = v[i] * x; return result; } template <int D, class T> static inline const Vec<D,T> operator / (const T &x, const Vec<D,T> &v) { Vec<D,T> result(VEC_UNINITIALIZED); for (int i = 0; i < D; i++) result[i] = x / v[i]; return result; } template <int D, class T> static inline const Vec<D,T> operator / (const Vec<D,T> &v, const T &x) { Vec<D,T> result(VEC_UNINITIALIZED); for (int i = 0; i < D; i++) result[i] = v[i] / x; return result; } // iostream operators template <int D, class T> static inline std::ostream &operator << (std::ostream &os, const Vec<D,T> &v) { os << "("; for (int i = 0; i < D-1; i++) os << v[i] << ", "; return os << v[D-1] << ")"; } template <int D, class T> static inline std::istream &operator >> (std::istream &is, Vec<D,T> &v) { char c1 = 0, c2 = 0; is >> c1; if (c1 == '(' || c1 == '[') { is >> v[0] >> std::ws >> c2; for (int i = 1; i < D; i++) { if (c2 == ',') is >> v[i] >> std::ws >> c2; else is.setstate(std::ios::failbit); } } if (c1 == '(' && c2 != ')') is.setstate(std::ios::failbit); else if (c1 == '[' && c2 != ']') is.setstate(std::ios::failbit); return is; } // Functions on Vecs template <int D, class T> static inline void swap(const Vec<D,T> &v1, const Vec<D,T> &v2) { for (int i = 0; i < D; i++) swap(v1[i], v2[i]); } template <int D, class T> static inline const T len2(const Vec<D,T> &v) { T l2 = v[0] * v[0]; for (int i = 1; i < D; i++) l2 += v[i] * v[i]; return l2; } template <int D, class T> static inline const T len(const Vec<D,T> &v) { return sqrt(len2(v)); } template <int D, class T> static inline const T dist2(const Vec<D,T> &v1, const Vec<D,T> &v2) { T d2 = sqr(v2[0]-v1[0]); for (int i = 1; i < D; i++) d2 += sqr(v2[i]-v1[i]); return d2; } template <int D, class T> static inline const T dist(const Vec<D,T> &v1, const Vec<D,T> &v2) { return sqrt(dist2(v1,v2)); } template <int D, class T> static inline Vec<D,T> normalize(Vec<D,T> &v) { T l = len(v); if (unlikely(l <= T(0))) { v[0] = T(1); for (int i = 1; i < D; i++) v[i] = T(0); return v; } l = T(1) / l; for (int i = 0; i < D; i++) v[i] *= l; return v; } // Area-weighted triangle face normal template <class T> static inline T trinorm(const T &v0, const T &v1, const T &v2) { return (typename T::value_type) 0.5 * ((v1 - v0) CROSS (v2 - v0)); } // Utility functions for square and cube, to go along with sqrt and cbrt template <class T> static inline T sqr(const T &x) { return x*x; } template <class T> static inline T cube(const T &x) { return x*x*x; } // Sign of a scalar template <class T> static inline T sgn(const T &x) { return (x < T(0)) ? T(-1) : T(1); } // Utility functions based on GLSL template <class T> static inline T fract(const T &x) { return x - floor(x); } template <class T> static inline T clamp(const T &x, const T &a, const T &b) { return x > a ? x < b ? x : b : a; // returns a on NaN } template <class T, class S> static inline T mix(const T &x, const T &y, const S &a) { return (S(1)-a) * x + a * y; } template <class T> static inline T step(const T &x, const T &a) { return x < a ? T(0) : T(1); } template <class T> static inline T smoothstep(const T &x, const T &a, const T &b) { if (b <= a) return step(x,a); T t = (x - a) / (b - a); return t <= T(0) ? T(0) : t >= T(1) ? T(1) : t * t * (T(3) - T(2) * t); } template <int D, class T> static inline T faceforward(const Vec<D,T> &N, const Vec<D,T> &I, const Vec<D,T> &Nref) { return ((Nref DOT I) < T(0)) ? N : -N; } template <int D, class T> static inline T reflect(const Vec<D,T> &I, const Vec<D,T> &N) { return I - (T(2) * (N DOT I)) * N; } template <int D, class T> static inline T refract(const Vec<D,T> &I, const Vec<D,T> &N, const T &eta) { T NdotI = N DOT I; T k = T(1) - sqr(eta) * (T(1) - sqr(NdotI)); return (k < T(0)) ? T(0) : eta * I - (eta * NdotI * sqrt(k)) * N; } // Generic macros for declaring 1-, 2-, and 3- argument // componentwise functions on vecs #define VEC_DECLARE_ONEARG(name) \ template <int D, class T> \ static inline Vec<D,T> name(const Vec<D,T> &v) \ { \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (int i = 0; i < D; i++) \ result[i] = name(v[i]); \ return result; \ } #define VEC_DECLARE_TWOARG(name) \ template <int D, class T> \ static inline Vec<D,T> name(const Vec<D,T> &v, const T &w) \ { \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (int i = 0; i < D; i++) \ result[i] = name(v[i], w); \ return result; \ } \ template <int D, class T> \ static inline Vec<D,T> name(const Vec<D,T> &v, const Vec<D,T> &w) \ { \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (int i = 0; i < D; i++) \ result[i] = name(v[i], w[i]); \ return result; \ } #define VEC_DECLARE_THREEARG(name) \ template <int D, class T> \ static inline Vec<D,T> name(const Vec<D,T> &v, const T &w, const T &x) \ { \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (int i = 0; i < D; i++) \ result[i] = name(v[i], w, x); \ return result; \ } \ template <int D, class T> \ static inline Vec<D,T> name(const Vec<D,T> &v, const Vec<D,T> &w, const Vec<D,T> &x) \ { \ Vec<D,T> result(VEC_UNINITIALIZED); \ for (int i = 0; i < D; i++) \ result[i] = name(v[i], w[i], x[i]); \ return result; \ } VEC_DECLARE_ONEARG(fabs) VEC_DECLARE_ONEARG(floor) VEC_DECLARE_ONEARG(ceil) VEC_DECLARE_ONEARG(round) VEC_DECLARE_ONEARG(trunc) VEC_DECLARE_ONEARG(sin) VEC_DECLARE_ONEARG(asin) VEC_DECLARE_ONEARG(cos) VEC_DECLARE_ONEARG(acos) VEC_DECLARE_ONEARG(tan) VEC_DECLARE_ONEARG(atan) VEC_DECLARE_ONEARG(exp) VEC_DECLARE_ONEARG(log) VEC_DECLARE_ONEARG(sqrt) VEC_DECLARE_ONEARG(sqr) VEC_DECLARE_ONEARG(cbrt) VEC_DECLARE_ONEARG(cube) VEC_DECLARE_ONEARG(sgn) VEC_DECLARE_TWOARG(min) VEC_DECLARE_TWOARG(max) VEC_DECLARE_TWOARG(atan2) VEC_DECLARE_TWOARG(pow) VEC_DECLARE_TWOARG(fmod) VEC_DECLARE_TWOARG(step) VEC_DECLARE_THREEARG(smoothstep) VEC_DECLARE_THREEARG(clamp) #undef VEC_DECLARE_ONEARG #undef VEC_DECLARE_TWOARG #undef VEC_DECLARE_THREEARG // Both valarrays and GLSL use abs() on a vector to mean fabs(). // Let's be compatible... template <int D, class T> static inline Vec<D,T> abs(const Vec<D,T> &v) { return fabs(v); } #endif
nodal_residualbased_elimination_builder_and_solver_for_FSI.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi, Alessandro Franci // // #if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI) #define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif /* External includes */ // #define USE_GOOGLE_HASH #ifdef USE_GOOGLE_HASH #include "sparsehash/dense_hash_set" //included in external libraries #else #include <unordered_set> #endif /* Project includes */ #include "utilities/timer.h" #include "includes/define.h" #include "includes/key_hash.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "pfem_fluid_dynamics_application_variables.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class NodalResidualBasedEliminationBuilderAndSolverForFSI * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Riccardo Rossi */ template <class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class NodalResidualBasedEliminationBuilderAndSolverForFSI : public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolverForFSI); typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef Node<3> NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; typedef Vector VectorType; typedef GlobalPointersVector<Node<3>> NodeWeakPtrVectorType; ///@} ///@name Life Cycle ///@{ /** Constructor. */ NodalResidualBasedEliminationBuilderAndSolverForFSI( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver) { // KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolverForFSI") << "Using the standard builder and solver " << std::endl; } /** Destructor. */ ~NodalResidualBasedEliminationBuilderAndSolverForFSI() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void SetMaterialPropertiesToFluid( ModelPart::NodeIterator itNode, double &density, double &deviatoricCoeff, double &volumetricCoeff, double timeInterval, double nodalVolume) { density = itNode->FastGetSolutionStepValue(DENSITY); deviatoricCoeff = itNode->FastGetSolutionStepValue(DYNAMIC_VISCOSITY); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); if (volumetricCoeff > 0) { volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); double bulkReduction = density * nodalVolume / (timeInterval * volumetricCoeff); volumetricCoeff *= bulkReduction; } } void SetMaterialPropertiesToSolid( ModelPart::NodeIterator itNode, double &density, double &deviatoricCoeff, double &volumetricCoeff, double timeInterval, double nodalVolume) { density = itNode->FastGetSolutionStepValue(SOLID_DENSITY); double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS); double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO); //deviatoricCoeff=deltaT*secondLame deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5; //volumetricCoeff=bulk*deltaT=deltaT*(firstLame+2*secondLame/3) volumetricCoeff = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)) + 2.0 * deviatoricCoeff / 3.0; } void BuildSolidNodally( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b, double hybridCoeff) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //contributions to the system LocalSystemMatrixType solidLHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType solidRHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType solidEquationId; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; const double FourThirds = 4.0 / 3.0; const double nTwoThirds = -2.0 / 3.0; //double theta = 0.5; double theta = 1.0; array_1d<double, 3> Acc(3, 0.0); double dNdXi = 0; double dNdYi = 0; double dNdZi = 0; double dNdXj = 0; double dNdYj = 0; double dNdZj = 0; unsigned int firstRow = 0; unsigned int firstCol = 0; double density = 0; double deviatoricCoeff = 0; double volumetricCoeff = 0; double dynamics = 1.0; //dynamics=0.0; // static problem without intertial effects /* #pragma omp parallel */ // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); double numNodesForExternalForce = 0; double nodalExternalForce = 0; bool belytsckoCase = false; bool cooksMembraneCase = false; if (cooksMembraneCase == true) { for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double posX = itNode->X0(); if (posX > 47.999 && posX < 48.001) { numNodesForExternalForce += 1.0; } } if (numNodesForExternalForce > 0) { nodalExternalForce = 1.0 / numNodesForExternalForce; } } if (belytsckoCase == true) { for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { double posX = itNode->X0(); if (posX > 24.999 && posX < 25.001) { numNodesForExternalForce += 1.0; } } if (numNodesForExternalForce > 0) { nodalExternalForce = 40.0 / numNodesForExternalForce; } } for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if (itNode->Is(SOLID)) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); Vector solidNodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); // const unsigned int neighSize = neighb_nodes.size()+1; const unsigned int neighSize = solidNodalSFDneighboursId.size(); const double nodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME); if (neighSize > 1 && nodalVolume > 0) { const unsigned int localSize = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS).size(); if (solidLHS_Contribution.size1() != localSize) solidLHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!! if (solidRHS_Contribution.size() != localSize) solidRHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!! if (solidEquationId.size() != localSize) solidEquationId.resize(localSize, false); solidLHS_Contribution = ZeroMatrix(localSize, localSize); solidRHS_Contribution = ZeroVector(localSize); this->SetMaterialPropertiesToSolid(itNode, density, deviatoricCoeff, volumetricCoeff, timeInterval, nodalVolume); firstRow = 0; firstCol = 0; if (dimension == 2) { //////////////////////////// LHS TERMS ////////////////////////////// solidLHS_Contribution(0, 0) += nodalVolume * density * 2.0 * dynamics / timeInterval; solidLHS_Contribution(1, 1) += nodalVolume * density * 2.0 * dynamics / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); solidRHS_Contribution[0] += -nodalVolume * density * Acc[0] * dynamics; solidRHS_Contribution[1] += -nodalVolume * density * Acc[1] * dynamics; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); // double posX= itNode->X(); // double posY= itNode->Y(); // double coeffX =(12.0-24.0*posY)*pow(posX,4); // coeffX += (-24.0+48.0*posY)*pow(posX,3); // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2); // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX; // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3); // double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3); // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2); // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX; // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4); // RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX; // RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY; solidRHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; solidRHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; ///////////////LOAD CONDITIONS FOR BELYTSCHKO CASE // if(itNode->X0()>24.999){ // solidRHS_Contribution[1]+=40.0/2.0; // mesh 4 (1 element per edge) //solidRHS_Contribution[1]+=40.0/3.0; // mesh 2 (2 element per edge) //solidRHS_Contribution[1]+=40.0/5.0; // mesh 1 (4 element per edge) //solidRHS_Contribution[1]+=40.0/9.0; // mesh 0.5 (8 element per edge) // solidRHS_Contribution[1]+=40.0/17.0; // mesh 0.25 (16 element per edge) // solidRHS_Contribution[1]+=40.0/33.0; // mesh 0.125 (32 element per edge) //solidRHS_Contribution[1]+=40.0/65.0; // mesh 0.0625 (64 element per edge) //} if (belytsckoCase == true) { if (itNode->X0() > 24.999 && itNode->X0() < 25.001) { solidRHS_Contribution[1] += nodalExternalForce; } } if (cooksMembraneCase == true) { if (itNode->X0() > 47.999 && itNode->X0() < 48.001) { solidRHS_Contribution[1] += nodalExternalForce; } } //-------- INTERNAL FORCES TERM -------// array_1d<double, 3> Sigma(3, 0.0); Sigma = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); solidEquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol + 1]; solidRHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[2]) * hybridCoeff; solidRHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[2]) * hybridCoeff; for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow + 1]; solidLHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff) * theta * hybridCoeff; solidLHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta * hybridCoeff; solidLHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta * hybridCoeff; solidLHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff) * theta * hybridCoeff; firstRow += 2; } firstRow = 0; firstCol += 2; unsigned int indexNode = i + 1; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize) { unsigned int other_neigh_nodes_id = solidNodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if (neigh_nodes_id == other_neigh_nodes_id) { solidEquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); break; } } } else if (i < neighb_nodes.size()) { solidEquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); } } /* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */ } else if (dimension == 3) { //////////////////////////// LHS TERMS ////////////////////////////// solidLHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval; solidLHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval; solidLHS_Contribution(2, 2) += nodalVolume * density * 2.0 / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); solidRHS_Contribution[0] += -nodalVolume * density * Acc[0]; solidRHS_Contribution[1] += -nodalVolume * density * Acc[1]; solidRHS_Contribution[2] += -nodalVolume * density * Acc[2]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); solidRHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; solidRHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; solidRHS_Contribution[2] += nodalVolume * density * VolumeAcceleration[2]; ///////////////LOAD CONDITIONS FOR BELITSCHKO CASE // if(itNode->X0()>24.999){ // // solidRHS_Contribution[1]+=40.0/2.0; // mesh 4 (1 element per edge) // // solidRHS_Contribution[1]+=40.0/3.0; // mesh 2 (2 element per edge) // // solidRHS_Contribution[1]+=40.0/5.0; // mesh 1 (4 element per edge) // solidRHS_Contribution[1]+=40.0/27.0; // mesh 0.5 (8 element per edge, 2 per width) // // solidRHS_Contribution[1]+=40.0/17.0; // mesh 0.25 (16 element per edge) // // solidRHS_Contribution[1]+=40.0/33.0; // mesh 0.125 (32 element per edge) // // solidRHS_Contribution[1]+=40.0/65.0; // mesh 0.0625 (64 element per edge) // } //-------- INTERNAL FORCES TERM -------// array_1d<double, 6> Sigma(6, 0.0); Sigma = itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // } const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); solidEquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); solidEquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol + 1]; dNdZi = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstCol + 2]; solidRHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[3] + dNdZi * Sigma[4]); solidRHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[3] + dNdZi * Sigma[5]); solidRHS_Contribution[firstCol + 2] += -nodalVolume * (dNdZi * Sigma[2] + dNdXi * Sigma[4] + dNdYi * Sigma[5]); for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow + 1]; dNdZj = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS)[firstRow + 2]; solidLHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi) * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi) * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 1, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 2, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 2, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff) * theta; solidLHS_Contribution(firstRow + 2, firstCol + 2) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi) * deviatoricCoeff) * theta; firstRow += 3; } firstRow = 0; firstCol += 3; unsigned int indexNode = i + 1; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize) { unsigned int other_neigh_nodes_id = solidNodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if (neigh_nodes_id == other_neigh_nodes_id) { solidEquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); solidEquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); break; } } } else if (i < neighb_nodes.size()) { solidEquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); solidEquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); solidEquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } } #ifdef _OPENMP Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId, mlock_array); #else Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId); #endif } } } // } KRATOS_CATCH("") } void BuildFluidNodally( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; /* std::cout<<"Building LHS and RHS of Momentum Equation Nodally"<<std::endl; */ //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different terms Element::EquationIdVectorType EquationId; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; const double FourThirds = 4.0 / 3.0; const double nTwoThirds = -2.0 / 3.0; double theta = 0.5; array_1d<double, 3> Acc(3, 0.0); // array_1d<double,6> Sigma(6,0.0); double pressure = 0; double dNdXi = 0; double dNdYi = 0; double dNdZi = 0; double dNdXj = 0; double dNdYj = 0; double dNdZj = 0; unsigned int firstRow = 0; unsigned int firstCol = 0; double density = 0; double deviatoricCoeff = 0; double volumetricCoeff = 0; /* #pragma omp parallel */ // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if ((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); // const unsigned int neighSize = neighb_nodes.size()+1; const unsigned int neighSize = nodalSFDneighboursId.size(); const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); if (neighSize > 1 && nodalVolume > 0) { const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size(); if (LHS_Contribution.size1() != localSize) LHS_Contribution.resize(localSize, localSize, false); //false says not to preserve existing storage!! if (RHS_Contribution.size() != localSize) RHS_Contribution.resize(localSize, false); //false says not to preserve existing storage!! if (EquationId.size() != localSize) EquationId.resize(localSize, false); LHS_Contribution = ZeroMatrix(localSize, localSize); RHS_Contribution = ZeroVector(localSize); this->SetMaterialPropertiesToFluid(itNode, density, deviatoricCoeff, volumetricCoeff, timeInterval, nodalVolume); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // // std::cout<<"density,deviatoricCoeff,volumetricCoeff "<<density<<" "<<deviatoricCoeff<<" "<<volumetricCoeff<<std::endl; // std::cout<<"INTERFACE nodalVolume "<<nodalVolume<<std::endl; // }else{ // std::cout<<"nodalVolume "<<nodalVolume<<std::endl; // } firstRow = 0; firstCol = 0; if (dimension == 2) { //////////////////////////// LHS TERMS ////////////////////////////// LHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval; LHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); RHS_Contribution[0] += -nodalVolume * density * Acc[0]; RHS_Contribution[1] += -nodalVolume * density * Acc[1]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); // double posX= itNode->X(); // double posY= itNode->Y(); // double coeffX =(12.0-24.0*posY)*pow(posX,4); // coeffX += (-24.0+48.0*posY)*pow(posX,3); // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2); // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX; // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3); // double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3); // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2); // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX; // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4); // RHS_Contribution[0]+=nodalVolume*density*VolumeAcceleration[0]*coeffX; // RHS_Contribution[1]+=nodalVolume*density*VolumeAcceleration[1]*coeffY; RHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; RHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; //-------- INTERNAL FORCES TERM -------// array_1d<double, 3> Sigma(3, 0.0); Sigma = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // } if (itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) * theta + itNode->FastGetSolutionStepValue(PRESSURE, 1) * (1 - theta); Sigma[0] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure; Sigma[1] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure; } const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1]; RHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[2]); RHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[2]); for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1]; LHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + dNdYj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + dNdXj * dNdXi * deviatoricCoeff) * theta; firstRow += 2; } firstRow = 0; firstCol += 2; unsigned int indexNode = i + 1; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if (neigh_nodes_id == other_neigh_nodes_id) { EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); break; } } } else if (i < neighb_nodes.size()) { EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); } } /* std::cout << "LHS_Contribution = " << LHS_Contribution << std::endl; */ } else if (dimension == 3) { //////////////////////////// LHS TERMS ////////////////////////////// LHS_Contribution(0, 0) += nodalVolume * density * 2.0 / timeInterval; LHS_Contribution(1, 1) += nodalVolume * density * 2.0 / timeInterval; LHS_Contribution(2, 2) += nodalVolume * density * 2.0 / timeInterval; //////////////////////////// RHS TERMS ////////////////////////////// //-------- DYNAMIC FORCES TERM -------// Acc = 2.0 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 0); RHS_Contribution[0] += -nodalVolume * density * Acc[0]; RHS_Contribution[1] += -nodalVolume * density * Acc[1]; RHS_Contribution[2] += -nodalVolume * density * Acc[2]; //-------- EXTERNAL FORCES TERM -------// array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); RHS_Contribution[0] += nodalVolume * density * VolumeAcceleration[0]; RHS_Contribution[1] += nodalVolume * density * VolumeAcceleration[1]; RHS_Contribution[2] += nodalVolume * density * VolumeAcceleration[2]; //-------- INTERNAL FORCES TERM -------// array_1d<double, 6> Sigma(6, 0.0); Sigma = itNode->FastGetSolutionStepValue(NODAL_CAUCHY_STRESS); // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){ // Sigma=itNode->FastGetSolutionStepValue(SOLID_NODAL_CAUCHY_STRESS); // } if (itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) * theta + itNode->FastGetSolutionStepValue(PRESSURE, 1) * (1 - theta); Sigma[0] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[0] + pressure; Sigma[1] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[1] + pressure; Sigma[2] = itNode->FastGetSolutionStepValue(NODAL_DEVIATORIC_CAUCHY_STRESS)[2] + pressure; } const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1]; dNdZi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 2]; RHS_Contribution[firstCol] += -nodalVolume * (dNdXi * Sigma[0] + dNdYi * Sigma[3] + dNdZi * Sigma[4]); RHS_Contribution[firstCol + 1] += -nodalVolume * (dNdYi * Sigma[1] + dNdXi * Sigma[3] + dNdZi * Sigma[5]); RHS_Contribution[firstCol + 2] += -nodalVolume * (dNdZi * Sigma[2] + dNdXi * Sigma[4] + dNdYi * Sigma[5]); for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1]; dNdZj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 2]; LHS_Contribution(firstRow, firstCol) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdXi + (dNdYj * dNdYi + dNdZj * dNdZi) * deviatoricCoeff) * theta; LHS_Contribution(firstRow, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdYi + dNdYj * dNdXi * deviatoricCoeff) * theta; LHS_Contribution(firstRow, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdXj * dNdZi + dNdZj * dNdXi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdXi + dNdXj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol + 1) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdYi + (dNdXj * dNdXi + dNdZj * dNdZi) * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 1, firstCol + 2) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdYj * dNdZi + dNdZj * dNdYi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 2, firstCol) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdXi + dNdXj * dNdZi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 2, firstCol + 1) += nodalVolume * ((nTwoThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdYi + dNdYj * dNdZi * deviatoricCoeff) * theta; LHS_Contribution(firstRow + 2, firstCol + 2) += nodalVolume * ((FourThirds * deviatoricCoeff + volumetricCoeff) * dNdZj * dNdZi + (dNdXj * dNdXi + dNdYj * dNdYi) * deviatoricCoeff) * theta; firstRow += 3; } firstRow = 0; firstCol += 3; unsigned int indexNode = i + 1; if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true && indexNode < neighSize) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode]; // std::cout<<"other_neigh_nodes_id= "<<other_neigh_nodes_id<<" within "<<nodalSFDneighboursId<<std::endl; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); // std::cout<<" neigh_nodes_id= "<< neigh_nodes_id<<std::endl; if (neigh_nodes_id == other_neigh_nodes_id) { EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); EquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); break; } } } else if (i < neighb_nodes.size()) { EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } } #ifdef _OPENMP Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } } } // } KRATOS_CATCH("") } /** * @brief This is a call to the linear system solver * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void SystemSolve( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else TSparseSpace::SetToZero(Dx); // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** *@brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b, ModelPart &rModelPart) { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //provide physical data as needed if (BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded()) BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart); //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else { TSparseSpace::SetToZero(Dx); KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY Timer::Start("Build"); // boost::timer m_build_time; double hybridCoeff = 1.0; // 0.5: half nodal - half elemental; 1.0 all nodal; 0.0 all elemental BuildSolidNodally(pScheme, rModelPart, A, b, hybridCoeff); if (hybridCoeff < 0.99999999) { BuildElementally(pScheme, rModelPart, A, b); } BuildFluidNodally(pScheme, rModelPart, A, b); // std::cout << "MOMENTUM EQ: build_time : " << m_build_time.elapsed() << std::endl; Timer::Stop("Build"); // ApplyPointLoads(pScheme,rModelPart,b); // Does nothing...dirichlet conditions are naturally dealt with in defining the residual ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b); KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; const double start_solve = OpenMPUtils::GetCurrentTime(); Timer::Start("Solve"); /* boost::timer m_solve_time; */ SystemSolveWithPhysics(A, Dx, b, rModelPart); /* std::cout << "MOMENTUM EQ: solve_time : " << m_solve_time.elapsed() << std::endl; */ Timer::Stop("Solve"); const double stop_solve = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System solve time: " << stop_solve - start_solve << std::endl; KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; KRATOS_CATCH("") } void BuildElementally( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &rA, TSystemVectorType &rb) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //getting the elements from the model const int nelements = static_cast<int>(rModelPart.Elements().size()); //getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); ModelPart::ElementsContainerType::iterator el_begin = rModelPart.ElementsBegin(); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; const double start_build = OpenMPUtils::GetCurrentTime(); // assemble all elements #pragma omp parallel firstprivate(nelements, nconditions, LHS_Contribution, RHS_Contribution, EquationId) { #pragma omp for schedule(guided, 512) nowait for (int k = 0; k < nelements; k++) { ModelPart::ElementsContainerType::iterator it = el_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool element_is_active = true; if ((it)->IsDefined(ACTIVE)) element_is_active = (it)->Is(ACTIVE); if (element_is_active) { //calculate elemental contribution pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); //assemble the elemental contribution #ifdef USE_LOCKS_IN_ASSEMBLY AssembleElementally(rA, rb, LHS_Contribution, RHS_Contribution, EquationId, mLockArray); #else AssembleElementally(rA, rb, LHS_Contribution, RHS_Contribution, EquationId); #endif // clean local elemental memory pScheme->CleanMemory(*it); } } #pragma omp for schedule(guided, 512) for (int k = 0; k < nconditions; k++) { ModelPart::ConditionsContainerType::iterator it = cond_begin + k; //detect if the element is active or not. If the user did not make any choice the element //is active by default bool condition_is_active = true; if ((it)->IsDefined(ACTIVE)) condition_is_active = (it)->Is(ACTIVE); if (condition_is_active) { //calculate elemental contribution pScheme->CalculateSystemContributions(*it, LHS_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo); #ifdef USE_LOCKS_IN_ASSEMBLY AssembleElementally(rA, rb, LHS_Contribution, RHS_Contribution, EquationId, mLockArray); #else AssembleElementally(rA, rb, LHS_Contribution, RHS_Contribution, EquationId); #endif // clean local elemental memory pScheme->CleanMemory(*it); } } } const double stop_build = OpenMPUtils::GetCurrentTime(); KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", (this->GetEchoLevel() >= 1 && rModelPart.GetCommunicator().MyPID() == 0)) << "System build time: " << stop_build - start_build << std::endl; KRATOS_INFO_IF("ResidualBasedEliminationBuilderAndSolver", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished building" << std::endl; KRATOS_CATCH("") } void AssembleElementally( TSystemMatrixType &rA, TSystemVectorType &rb, const LocalSystemMatrixType &rLHSContribution, const LocalSystemVectorType &rRHSContribution, const Element::EquationIdVectorType &rEquationId #ifdef USE_LOCKS_IN_ASSEMBLY , std::vector<omp_lock_t> &rLockArray #endif ) { unsigned int local_size = rLHSContribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = rEquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { #ifdef USE_LOCKS_IN_ASSEMBLY omp_set_lock(&rLockArray[i_global]); b[i_global] += rRHSContribution(i_local); #else double &r_a = rb[i_global]; const double &v_a = rRHSContribution(i_local); #pragma omp atomic r_a += v_a; #endif AssembleRowContributionFreeDofs(rA, rLHSContribution, i_global, i_local, rEquationId); #ifdef USE_LOCKS_IN_ASSEMBLY omp_unset_lock(&rLockArray[i_global]); #endif } //note that computation of reactions is not performed here! } } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart) override { KRATOS_TRY; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl; //Gets the array of elements from the modeler ElementsArrayType &pElements = rModelPart.Elements(); const int nelements = static_cast<int>(pElements.size()); Element::DofsVectorType ElementalDofList; ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); unsigned int nthreads = OpenMPUtils::GetNumThreads(); // typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type; // typedef std::unordered_set < NodeType::DofType::Pointer, // DofPointerHasher, // DofPointerComparor, // allocator_type > set_type; #ifdef USE_GOOGLE_HASH typedef google::dense_hash_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #else typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #endif // std::vector<set_type> dofs_aux_list(nthreads); // std::vector<allocator_type> allocators(nthreads); for (int i = 0; i < static_cast<int>(nthreads); i++) { #ifdef USE_GOOGLE_HASH dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer()); #else // dofs_aux_list[i] = set_type( allocators[i]); dofs_aux_list[i].reserve(nelements); #endif } // #pragma omp parallel for firstprivate(nelements, ElementalDofList) for (int i = 0; i < static_cast<int>(nelements); i++) { typename ElementsArrayType::iterator it = pElements.begin() + i; const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // gets list of Dof involved on every element pScheme->GetElementalDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } ConditionsArrayType &pConditions = rModelPart.Conditions(); const int nconditions = static_cast<int>(pConditions.size()); #pragma omp parallel for firstprivate(nconditions, ElementalDofList) for (int i = 0; i < nconditions; i++) { typename ConditionsArrayType::iterator it = pConditions.begin() + i; const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // gets list of Dof involved on every element pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } //here we do a reduction in a tree so to have everything on thread 0 unsigned int old_max = nthreads; unsigned int new_max = ceil(0.5 * static_cast<double>(old_max)); while (new_max >= 1 && new_max != old_max) { // //just for debugging // std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl; // for (int i = 0; i < new_max; i++) // { // if (i + new_max < old_max) // { // std::cout << i << " - " << i + new_max << std::endl; // } // } // std::cout << "********************" << std::endl; #pragma omp parallel for for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end()); dofs_aux_list[i + new_max].clear(); } } old_max = new_max; new_max = ceil(0.5 * static_cast<double>(old_max)); } DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); Doftemp.reserve(dofs_aux_list[0].size()); for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++) { Doftemp.push_back(*it); } Doftemp.Sort(); BaseType::mDofSet = Doftemp; // Throws an execption if there are no Degrees of freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl; #ifdef _OPENMP if (mlock_array.size() != 0) { for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_destroy_lock(&mlock_array[i]); } mlock_array.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_init_lock(&mlock_array[i]); #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if (BaseType::GetCalculateReactionsFlag()) { for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id() << std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief Organises the dofset in order to speed up the building phase * @param rModelPart The model part of the problem to solve */ void SetUpSystem( ModelPart &rModelPart) override { // Set equation id for degrees of freedom // the free degrees of freedom are positioned at the beginning of the system, // while the fixed one are at the end (in opposite order). // // that means that if the EquationId is greater than "mEquationSystemSize" // the pointed degree of freedom is restrained // int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if (dof_iterator->IsFixed()) dof_iterator->SetEquationId(--fix_id); else dof_iterator->SetEquationId(free_id++); BaseType::mEquationSystemSize = fix_id; } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType &pA, TSystemVectorPointerType &pDx, TSystemVectorPointerType &pb, ModelPart &rModelPart) override { KRATOS_TRY // boost::timer m_contruct_matrix; if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0)); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType &A = *pA; TSystemVectorType &Dx = *pDx; TSystemVectorType &b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructureForFSI(pScheme, A, rModelPart); } else { if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR << "The equation system size has changed during the simulation. This is not permited." << std::endl; A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ConstructMatrixStructureForFSI(pScheme, A, rModelPart); } } if (Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize, false); if (b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize, false); //if needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag == true) { unsigned int ReactionsVectorSize = BaseType::mDofSet.size(); if (BaseType::mpReactionsVector->size() != ReactionsVectorSize) BaseType::mpReactionsVector->resize(ReactionsVectorSize, false); } // std::cout << "MOMENTUM EQ: contruct_matrix : " << m_contruct_matrix.elapsed() << std::endl; KRATOS_CATCH("") } inline void AssembleRowContributionFreeDofs( TSystemMatrixType &rA, const Matrix &rALocal, const IndexType i, const IndexType i_local, const Element::EquationIdVectorType &EquationId) { double *values_vector = rA.value_data().begin(); std::size_t *index1_vector = rA.index1_data().begin(); std::size_t *index2_vector = rA.index2_data().begin(); const std::size_t left_limit = index1_vector[i]; // Find the first entry // We iterate over the equation ids until we find the first equation id to be considered // We count in which component we find an ID std::size_t last_pos = 0; std::size_t last_found = 0; std::size_t counter = 0; for (std::size_t j = 0; j < EquationId.size(); ++j) { ++counter; const std::size_t j_global = EquationId[j]; if (j_global < BaseType::mEquationSystemSize) { last_pos = ForwardFind(j_global, left_limit, index2_vector); last_found = j_global; break; } } // If the counter is equal to the size of the EquationID vector that means that only one dof will be considered, if the number is greater means that all the dofs are fixed. If the number is below means that at we have several dofs free to be considered if (counter <= EquationId.size()) { #ifndef USE_LOCKS_IN_ASSEMBLY double &r_a = values_vector[last_pos]; const double &v_a = rALocal(i_local, counter - 1); #pragma omp atomic r_a += v_a; #else values_vector[last_pos] += rALocal(i_local, counter - 1); #endif // Now find all of the other entries std::size_t pos = 0; for (std::size_t j = counter; j < EquationId.size(); ++j) { std::size_t id_to_find = EquationId[j]; if (id_to_find < BaseType::mEquationSystemSize) { if (id_to_find > last_found) pos = ForwardFind(id_to_find, last_pos + 1, index2_vector); else if (id_to_find < last_found) pos = BackwardFind(id_to_find, last_pos - 1, index2_vector); else pos = last_pos; #ifndef USE_LOCKS_IN_ASSEMBLY double &r = values_vector[pos]; const double &v = rALocal(i_local, j); #pragma omp atomic r += v; #else values_vector[pos] += Alocal(i_local, j); #endif last_found = id_to_find; last_pos = pos; } } } } inline std::size_t ForwardFind(const std::size_t id_to_find, const std::size_t start, const std::size_t *index_vector) { std::size_t pos = start; while (id_to_find != index_vector[pos]) pos++; return pos; } inline std::size_t BackwardFind(const std::size_t id_to_find, const std::size_t start, const std::size_t *index_vector) { std::size_t pos = start; while (id_to_find != index_vector[pos]) pos--; return pos; } //************************************************************************** //************************************************************************** /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely * unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details For explanation of how it works for a particular implementation the user * should refer to the particular Builder And Solver choosen * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { this->mDofSet = DofsArrayType(); if (this->mpReactionsVector != NULL) TSparseSpace::Clear((this->mpReactionsVector)); // this->mReactionsVector = TSystemVectorType(); this->mpLinearSystemSolver->Clear(); KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverForFSI", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model part of the problem to solve * @return 0 all ok */ int Check(ModelPart &rModelPart) override { KRATOS_TRY return 0; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ void Assemble( TSystemMatrixType &A, TSystemVectorType &b, const LocalSystemMatrixType &LHS_Contribution, const LocalSystemVectorType &RHS_Contribution, const Element::EquationIdVectorType &EquationId #ifdef _OPENMP , std::vector<omp_lock_t> &lock_array #endif ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&lock_array[i_global]); #endif b[i_global] += RHS_Contribution(i_local); for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) { A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } #ifdef _OPENMP omp_unset_lock(&lock_array[i_global]); #endif } //note that assembly on fixed rows is not performed here } } //************************************************************************** virtual void ConstructMatrixStructureForFSI( typename TSchemeType::Pointer pScheme, TSystemMatrixType &A, ModelPart &rModelPart) { //filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); // Getting the array of the conditions const int nconditions = static_cast<int>(rModelPart.Conditions().size()); ModelPart::ConditionsContainerType::iterator cond_begin = rModelPart.ConditionsBegin(); const std::size_t equation_size = BaseType::mEquationSystemSize; #ifdef USE_GOOGLE_HASH std::vector<google::dense_hash_set<std::size_t>> indices(equation_size); const std::size_t empty_key = 2 * equation_size + 10; #else std::vector<std::unordered_set<std::size_t>> indices(equation_size); #endif #pragma omp parallel for firstprivate(equation_size) for (int iii = 0; iii < static_cast<int>(equation_size); iii++) { #ifdef USE_GOOGLE_HASH indices[iii].set_empty_key(empty_key); #else indices[iii].reserve(40); #endif } Element::EquationIdVectorType EquationId; ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { if (itNode->Is(SOLID)) { const unsigned int localSize = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS).size(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER); const unsigned int neighSize = nodalSFDneighboursId.size(); if (EquationId.size() != localSize) EquationId.resize(localSize, false); unsigned int firstCol = 0; const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { unsigned int indexNode = i + 1; if (indexNode < neighSize) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode]; firstCol += dimension; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); if (neigh_nodes_id == other_neigh_nodes_id) { EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) { EquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } break; } } } } } else { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { firstCol += dimension; EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) { EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } } } if ((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { const unsigned int localSize = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS).size(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER); const unsigned int neighSize = nodalSFDneighboursId.size(); if (EquationId.size() != localSize) EquationId.resize(localSize, false); unsigned int firstCol = 0; const unsigned int xDofPos = itNode->GetDofPosition(VELOCITY_X); EquationId[0] = itNode->GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[1] = itNode->GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) EquationId[2] = itNode->GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); if (itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { unsigned int indexNode = i + 1; if (indexNode < neighSize) { unsigned int other_neigh_nodes_id = nodalSFDneighboursId[indexNode]; firstCol += dimension; for (unsigned int k = 0; k < neighb_nodes.size(); k++) { unsigned int neigh_nodes_id = neighb_nodes[k].Id(); if (neigh_nodes_id == other_neigh_nodes_id) { EquationId[firstCol] = neighb_nodes[k].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[k].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) { EquationId[firstCol + 2] = neighb_nodes[k].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } break; } } } } } else { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { firstCol += dimension; EquationId[firstCol] = neighb_nodes[i].GetDof(VELOCITY_X, xDofPos).EquationId(); EquationId[firstCol + 1] = neighb_nodes[i].GetDof(VELOCITY_Y, xDofPos + 1).EquationId(); if (dimension == 3) { EquationId[firstCol + 2] = neighb_nodes[i].GetDof(VELOCITY_Z, xDofPos + 2).EquationId(); } } } } for (std::size_t i = 0; i < EquationId.size(); i++) { if (EquationId[i] < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&mlock_array[EquationId[i]]); #endif auto &row_indices = indices[EquationId[i]]; for (auto it = EquationId.begin(); it != EquationId.end(); it++) { if (*it < BaseType::mEquationSystemSize) row_indices.insert(*it); } #ifdef _OPENMP omp_unset_lock(&mlock_array[EquationId[i]]); #endif } } } Element::EquationIdVectorType ids(3, 0); #pragma omp parallel for firstprivate(nconditions, ids) for (int iii = 0; iii < nconditions; iii++) { typename ConditionsArrayType::iterator i_condition = cond_begin + iii; pScheme->Condition_EquationId(*(i_condition.base()), ids, CurrentProcessInfo); for (std::size_t i = 0; i < ids.size(); i++) { if (ids[i] < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&mlock_array[ids[i]]); #endif auto &row_indices = indices[ids[i]]; for (auto it = ids.begin(); it != ids.end(); it++) { if (*it < BaseType::mEquationSystemSize) row_indices.insert(*it); } #ifdef _OPENMP omp_unset_lock(&mlock_array[ids[i]]); #endif } } } //count the row sizes unsigned int nnz = 0; for (unsigned int i = 0; i < indices.size(); i++) nnz += indices[i].size(); A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); double *Avalues = A.value_data().begin(); std::size_t *Arow_indices = A.index1_data().begin(); std::size_t *Acol_indices = A.index2_data().begin(); //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(A.size1()); i++) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(A.size1()); i++) { const unsigned int row_begin = Arow_indices[i]; const unsigned int row_end = Arow_indices[i + 1]; unsigned int k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); it++) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } A.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } void AssembleLHS( TSystemMatrixType &A, LocalSystemMatrixType &LHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ #ifdef _OPENMP std::vector<omp_lock_t> mlock_array; #endif ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ inline void AddUnique(std::vector<std::size_t> &v, const std::size_t &candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while (i != endit && (*i) != candidate) { i++; } if (i == endit) { v.push_back(candidate); } } void AssembleRHS( TSystemVectorType &b, const LocalSystemVectorType &RHS_Contribution, const Element::EquationIdVectorType &EquationId) { unsigned int local_size = RHS_Contribution.size(); if (BaseType::mCalculateReactionsFlag == false) { for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double &b_value = b[i_global]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } else { TSystemVectorType &ReactionsVector = *BaseType::mpReactionsVector; for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double &b_value = b[i_global]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } else //fixed dof { double &b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } } //************************************************************************** void AssembleLHS_CompleteOnFreeRows( TSystemMatrixType &A, LocalSystemMatrixType &LHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { int j_global = EquationId[j_local]; A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class NodalResidualBasedEliminationBuilderAndSolverForFSI */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI defined */
prepress.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % PPPP RRRR EEEEE PPPP RRRR EEEEE SSSSS SSSSS % % P P R R E P P R R E SS SS % % PPPP RRRR EEE PPPP RRRR EEE SSS SSS % % P R R E P R R E SS SS % % P R R EEEEE P R R EEEEE SSSSS SSSSS % % % % % % MagickCore Prepress Methods % % % % Software Design % % Cristy % % October 2001 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/cache-view.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/hashmap.h" #include "magick/image.h" #include "magick/list.h" #include "magick/memory_.h" #include "magick/pixel-accessor.h" #include "magick/prepress.h" #include "magick/registry.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/splay-tree.h" #include "magick/string_.h" #include "magick/thread-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e T o t a l I n k D e n s i t y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageTotalInkDensity() returns the total ink density for a CMYK image. % Total Ink Density (TID) is determined by adding the CMYK values in the % darkest shadow area in an image. % % The format of the GetImageTotalInkDensity method is: % % double GetImageTotalInkDensity(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport double GetImageTotalInkDensity(Image *image) { CacheView *image_view; double total_ink_density; ExceptionInfo *exception; MagickBooleanType status; ssize_t y; assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); if (image->colorspace != CMYKColorspace) { (void) ThrowMagickException(&image->exception,GetMagickModule(), ImageError,"ColorSeparatedImageRequired","`%s'",image->filename); return(0.0); } status=MagickTrue; total_ink_density=0.0; exception=(&image->exception); image_view=AcquireVirtualCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double density; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const PixelPacket *) NULL) { status=MagickFalse; continue; } indexes=GetCacheViewVirtualIndexQueue(image_view); for (x=0; x < (ssize_t) image->columns; x++) { density=(double) GetPixelRed(p)+GetPixelGreen(p)+ GetPixelBlue(p)+GetPixelIndex(indexes+x); if (density > total_ink_density) #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_GetImageTotalInkDensity) #endif { if (density > total_ink_density) total_ink_density=density; } p++; } } image_view=DestroyCacheView(image_view); if (status == MagickFalse) total_ink_density=0.0; return(total_ink_density); }
app.c
/** * Christina Giannoula * cgiannoula: christina.giann@gmail.com */ #include <stdio.h> #include <stdlib.h> #include <stdbool.h> #include <string.h> #include <dpu.h> #include <dpu_log.h> #include <unistd.h> #include <getopt.h> #include <assert.h> #include <math.h> #include <omp.h> #include "../support/common.h" #include "../support/matrix.h" #include "../support/params.h" #include "../support/partition.h" #include "../support/timer.h" #include "../support/utils.h" // Define the DPU Binary path as DPU_BINARY here. #ifndef DPU_BINARY #define DPU_BINARY "./bin/spmv_dpu" #endif #define DPU_CAPACITY (64 << 20) // A DPU's capacity is 64 MB /* * Main Structures: * 1. Matrices * 2. Input vector * 3. Output vector * 4. Help structures for data partitioning */ static struct RBDCSRMatrix* A; static struct COOMatrix* B; static val_dt* x; static val_dt* z; static val_dt* y; static struct partition_info_t *part_info; /** * @brief Specific information for each DPU */ struct dpu_info_t { uint32_t rows_per_dpu; uint32_t cols_per_dpu; uint32_t rows_per_dpu_pad; uint32_t prev_rows_dpu; uint32_t prev_nnz_dpu; uint32_t nnz; uint32_t nnz_pad; uint32_t ptr_offset; }; struct dpu_info_t *dpu_info; /** * @brief find the dpus_per_row_partition * @param factor n to create partitions * @param column_partitions to create vert_partitions * @param horz_partitions to return the 2D partitioning */ void find_partitions(uint32_t n, uint32_t *horz_partitions, uint32_t vert_partitions) { uint32_t dpus_per_vert_partition = n / vert_partitions; *horz_partitions = dpus_per_vert_partition; } /** * @brief initialize input vector * @param pointer to input vector and vector size */ void init_vector(val_dt* vec, uint32_t size) { for(unsigned int i = 0; i < size; ++i) { vec[i] = (val_dt) (i%4+1); } } /** * @brief compute output in the host CPU */ static void spmv_host(val_dt* y, struct RBDCSRMatrix *A, val_dt* x) { uint64_t total_nnzs = 0; for (uint32_t c = 0; c < A->vert_partitions; c++) { for(uint32_t rowIndx = 0; rowIndx < A->nrows; ++rowIndx) { val_dt sum = 0; uint32_t ptr_offset = c * (A->nrows + 1); uint32_t col_offset = c * A->tile_width; for(uint32_t n = A->drowptr[ptr_offset + rowIndx]; n < A->drowptr[ptr_offset + rowIndx + 1]; n++) { uint32_t colIndx = A->dcolind[total_nnzs]; val_dt value = A->dval[total_nnzs++]; sum += x[col_offset + colIndx] * value; } y[rowIndx] += sum; } } } /** * @brief main of the host application */ int main(int argc, char **argv) { struct Params p = input_params(argc, argv); struct dpu_set_t dpu_set, dpu; uint32_t nr_of_dpus; uint32_t nr_of_ranks; // Allocate DPUs and load binary DPU_ASSERT(dpu_alloc(NR_DPUS, NULL, &dpu_set)); DPU_ASSERT(dpu_load(dpu_set, DPU_BINARY, NULL)); DPU_ASSERT(dpu_get_nr_dpus(dpu_set, &nr_of_dpus)); DPU_ASSERT(dpu_get_nr_ranks(dpu_set, &nr_of_ranks)); printf("[INFO] Allocated %d DPU(s)\n", nr_of_dpus); printf("[INFO] Allocated %d Rank(s)\n", nr_of_ranks); printf("[INFO] Allocated %d TASKLET(s) per DPU\n", NR_TASKLETS); unsigned int i; // Initialize input data B = readCOOMatrix(p.fileName); sortCOOMatrix(B); uint32_t horz_partitions = 0; uint32_t vert_partitions = p.vert_partitions; find_partitions(nr_of_dpus, &horz_partitions, p.vert_partitions); printf("[INFO] %dx%d Matrix Partitioning\n\n", horz_partitions, vert_partitions); A = coo2rbdcsr(B, horz_partitions, vert_partitions); freeCOOMatrix(B); // Initialize partition data part_info = partition_init(A, nr_of_dpus, p.max_nranks, NR_TASKLETS); #if FG_TRANS struct dpu_set_t rank; uint32_t each_rank; DPU_RANK_FOREACH(dpu_set, rank, each_rank){ uint32_t nr_dpus_in_rank; DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank)); part_info->active_dpus_per_rank[each_rank+1] = nr_dpus_in_rank; } uint32_t sum = 0; for(uint32_t i=0; i < p.max_nranks+1; i++) { part_info->accum_dpus_ranks[i] = part_info->active_dpus_per_rank[i] + sum; sum += part_info->active_dpus_per_rank[i]; } #endif // Initialize help data - Padding needed uint32_t ncols_pad = A->ncols; uint32_t tile_width_pad = A->tile_width; uint32_t nrows_pad = A->nrows; if (ncols_pad % (8 / byte_dt) != 0) ncols_pad = ncols_pad + ((8 / byte_dt) - (ncols_pad % (8 / byte_dt))); if (tile_width_pad % (8 / byte_dt) != 0) tile_width_pad = tile_width_pad + ((8 / byte_dt) - (tile_width_pad % (8 / byte_dt))); if (nrows_pad % (8 / byte_dt) != 0) nrows_pad = nrows_pad + ((8 / byte_dt) - (nrows_pad % (8 / byte_dt))); // Allocate input vector x = (val_dt *) malloc(ncols_pad * sizeof(val_dt)); // Allocate output vector z = (val_dt *) calloc(nrows_pad, sizeof(val_dt)); // Initialize input vector with arbitrary data init_vector(x, ncols_pad); // Load-balance nnzs among DPUs of the same vertical partition partition_by_nnz(A, part_info); // Initialize help data dpu_info = (struct dpu_info_t *) malloc(nr_of_dpus * sizeof(struct dpu_info_t)); dpu_arguments_t *input_args = (dpu_arguments_t *) malloc(nr_of_dpus * sizeof(dpu_arguments_t)); // Max limits for parallel transfers uint64_t max_rows_per_dpu = 0; uint64_t max_nnz_ind_per_dpu = 0; uint64_t max_nnz_val_per_dpu = 0; uint64_t max_rows_per_tasklet = 0; // Timer for measurements Timer timer; uint64_t total_nnzs = 0; i = 0; DPU_FOREACH(dpu_set, dpu, i) { // Find padding for rows and non-zero elements needed for CPU-DPU transfers uint32_t tile_horz_indx = i % A->horz_partitions; uint32_t tile_vert_indx = i / A->horz_partitions; uint32_t rows_per_dpu = part_info->row_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx + 1] - part_info->row_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx]; uint32_t prev_rows_dpu = part_info->row_split[tile_vert_indx * (A->horz_partitions + 1) + tile_horz_indx]; // Pad data to be transfered uint32_t rows_per_dpu_pad = rows_per_dpu + 1; if (rows_per_dpu_pad % (8 / byte_dt) != 0) rows_per_dpu_pad += ((8 / byte_dt) - (rows_per_dpu_pad % (8 / byte_dt))); #if INT64 || FP64 if (rows_per_dpu_pad % 2 == 1) rows_per_dpu_pad++; #endif if (rows_per_dpu_pad > max_rows_per_dpu) max_rows_per_dpu = rows_per_dpu_pad; unsigned int nnz, nnz_ind_pad, nnz_val_pad; nnz = A->drowptr[tile_vert_indx * (A->nrows + 1) + prev_rows_dpu + rows_per_dpu] - A->drowptr[tile_vert_indx * (A->nrows + 1) + prev_rows_dpu]; if (nnz % 2 != 0) nnz_ind_pad = nnz + 1; else nnz_ind_pad = nnz; if (nnz % (8 / byte_dt) != 0) nnz_val_pad = nnz + ((8 / byte_dt) - (nnz % (8 / byte_dt))); else nnz_val_pad = nnz; #if INT64 || FP64 if (nnz_ind_pad % 2 == 1) nnz_ind_pad++; if (nnz_val_pad % 2 == 1) nnz_val_pad++; #endif if (nnz_ind_pad > max_nnz_ind_per_dpu) max_nnz_ind_per_dpu = nnz_ind_pad; if (nnz_val_pad > max_nnz_val_per_dpu) max_nnz_val_per_dpu = nnz_val_pad; uint32_t prev_nnz_dpu = total_nnzs; total_nnzs += nnz; // Keep information per DPU dpu_info[i].rows_per_dpu = rows_per_dpu; dpu_info[i].cols_per_dpu = A->tile_width; dpu_info[i].prev_rows_dpu = prev_rows_dpu; dpu_info[i].prev_nnz_dpu = prev_nnz_dpu; dpu_info[i].nnz = nnz; dpu_info[i].nnz_pad = nnz_ind_pad; dpu_info[i].ptr_offset = tile_vert_indx * (A->nrows + 1) + prev_rows_dpu; // Find input arguments per DPU input_args[i].nrows = rows_per_dpu; input_args[i].tcols = tile_width_pad; input_args[i].nnz_pad = nnz_ind_pad; input_args[i].nnz_offset = A->drowptr[tile_vert_indx * (A->nrows + 1) + prev_rows_dpu]; #if BLNC_TSKLT_ROW // Load-balance rows across tasklets partition_tsklt_by_row(part_info, i, rows_per_dpu, NR_TASKLETS); #else // Load-balance nnz across tasklets partition_tsklt_by_nnz(A, part_info, i, rows_per_dpu, nnz, tile_vert_indx * (A->nrows + 1) + prev_rows_dpu, NR_TASKLETS); #endif uint32_t t; for (t = 0; t < NR_TASKLETS; t++) { // Find input arguments per tasklet input_args[i].start_row[t] = part_info->row_split_tasklet[t]; input_args[i].rows_per_tasklet[t] = part_info->row_split_tasklet[t+1] - part_info->row_split_tasklet[t]; if (input_args[i].rows_per_tasklet[t] > max_rows_per_tasklet) max_rows_per_tasklet = input_args[i].rows_per_tasklet[t]; } } assert(A->nnz == total_nnzs && "wrong balancing"); #if FG_TRANS // Find max number of rows (subset of elements of the output vector) among DPUs of each rank DPU_RANK_FOREACH(dpu_set, rank, each_rank){ uint32_t max_rows_cur_rank = 0; uint32_t nr_dpus_in_rank; DPU_ASSERT(dpu_get_nr_dpus(rank, &nr_dpus_in_rank)); uint32_t start_dpu = part_info->accum_dpus_ranks[each_rank]; for (uint32_t k = 0; k < nr_dpus_in_rank; k++) { if (start_dpu + k >= nr_of_dpus) break; if (dpu_info[start_dpu + k].rows_per_dpu > max_rows_cur_rank) max_rows_cur_rank = dpu_info[start_dpu + k].rows_per_dpu; } if (max_rows_cur_rank % 2 != 0) max_rows_cur_rank++; if (max_rows_cur_rank % (8 / byte_dt) != 0) max_rows_cur_rank += ((8 / byte_dt) - (max_rows_cur_rank % (8 / byte_dt))); part_info->max_rows_per_rank[each_rank] = (uint32_t) max_rows_cur_rank; } #endif // Initializations for parallel transfers with padding needed if (max_rows_per_dpu % 2 != 0) max_rows_per_dpu++; if (max_rows_per_dpu % (8 / byte_dt) != 0) max_rows_per_dpu += ((8 / byte_dt) - (max_rows_per_dpu % (8 / byte_dt))); if (max_nnz_ind_per_dpu % 2 != 0) max_nnz_ind_per_dpu++; if (max_nnz_val_per_dpu % (8 / byte_dt) != 0) max_nnz_val_per_dpu += ((8 / byte_dt) - (max_nnz_val_per_dpu % (8 / byte_dt))); if (max_rows_per_tasklet % (8 / byte_dt) != 0) max_rows_per_tasklet += ((8 / byte_dt) - (max_rows_per_tasklet % (8 / byte_dt))); // Re-allocations for padding needed A->drowptr = (uint32_t *) realloc(A->drowptr, (max_rows_per_dpu * (uint64_t) nr_of_dpus * sizeof(uint32_t))); A->dcolind = (uint32_t *) realloc(A->dcolind, (max_nnz_ind_per_dpu * nr_of_dpus * sizeof(uint32_t))); A->dval = (val_dt *) realloc(A->dval, (max_nnz_val_per_dpu * nr_of_dpus * sizeof(val_dt))); x = (val_dt *) realloc(x, (uint64_t) ((uint64_t) A->vert_partitions * (uint64_t) tile_width_pad) * (uint64_t) sizeof(val_dt)); y = (val_dt *) malloc((uint64_t) ((uint64_t) nr_of_dpus * (uint64_t) max_rows_per_dpu) * (uint64_t) sizeof(val_dt)); // Count total number of bytes to be transfered in MRAM of DPU unsigned long int total_bytes; total_bytes = ((max_rows_per_dpu) * sizeof(uint32_t)) + (max_nnz_ind_per_dpu * sizeof(uint32_t)) + (max_nnz_val_per_dpu * sizeof(val_dt)) + (tile_width_pad * sizeof(val_dt)) + (max_rows_per_dpu * sizeof(val_dt)); assert(total_bytes <= DPU_CAPACITY && "Bytes needed exceeded MRAM size"); // Copy input arguments to DPUs i = 0; DPU_FOREACH(dpu_set, dpu, i) { input_args[i].max_rows = max_rows_per_dpu; input_args[i].max_nnz_ind = max_nnz_ind_per_dpu; DPU_ASSERT(dpu_prepare_xfer(dpu, input_args + i)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, "DPU_INPUT_ARGUMENTS", 0, sizeof(dpu_arguments_t), DPU_XFER_DEFAULT)); // Copy input matrix to DPUs startTimer(&timer, 0); // Copy Rowptr i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->drowptr + dpu_info[i].ptr_offset)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, (max_rows_per_dpu * sizeof(val_dt) + tile_width_pad * sizeof(val_dt)), max_rows_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT)); // Copy Colind i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->dcolind + dpu_info[i].prev_nnz_dpu)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_rows_per_dpu * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_rows_per_dpu * sizeof(uint32_t), max_nnz_ind_per_dpu * sizeof(uint32_t), DPU_XFER_DEFAULT)); // Copy Values i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, A->dval + dpu_info[i].prev_nnz_dpu)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_rows_per_dpu * sizeof(val_dt) + tile_width_pad * sizeof(val_dt) + max_rows_per_dpu * sizeof(uint32_t) + max_nnz_ind_per_dpu * sizeof(uint32_t), max_nnz_val_per_dpu * sizeof(val_dt), DPU_XFER_DEFAULT)); stopTimer(&timer, 0); // Copy input vector to DPUs startTimer(&timer, 1); i = 0; DPU_FOREACH(dpu_set, dpu, i) { uint32_t tile_vert_indx = i / A->horz_partitions; DPU_ASSERT(dpu_prepare_xfer(dpu, x + tile_vert_indx * A->tile_width)); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_TO_DPU, DPU_MRAM_HEAP_POINTER_NAME, max_rows_per_dpu * sizeof(val_dt), tile_width_pad * sizeof(val_dt), DPU_XFER_DEFAULT)); stopTimer(&timer, 1); // Run kernel on DPUs startTimer(&timer, 2); DPU_ASSERT(dpu_launch(dpu_set, DPU_SYNCHRONOUS)); stopTimer(&timer, 2); #if LOG // Display DPU Log (default: disabled) DPU_FOREACH(dpu_set, dpu) { DPU_ASSERT(dpulog_read_for_dpu(dpu.dpu, stdout)); } #endif // Retrieve results for output vector from DPUs startTimer(&timer, 3); #if CG_TRANS // Coarse-grained data transfers in the output vector i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_rows_per_dpu))); } DPU_ASSERT(dpu_push_xfer(dpu_set, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, max_rows_per_dpu * sizeof(val_dt), DPU_XFER_DEFAULT)); #endif #if FG_TRANS // Fine-grained data transfers in the output vector at rank granularity i = 0; DPU_FOREACH(dpu_set, dpu, i) { DPU_ASSERT(dpu_prepare_xfer(dpu, y + (i * max_rows_per_dpu))); } i = 0; //struct dpu_set_t rank; DPU_RANK_FOREACH(dpu_set, rank) { DPU_ASSERT(dpu_push_xfer(rank, DPU_XFER_FROM_DPU, DPU_MRAM_HEAP_POINTER_NAME, 0, part_info->max_rows_per_rank[i] * sizeof(val_dt), DPU_XFER_ASYNC)); i++; } DPU_ASSERT(dpu_sync(dpu_set)); #endif stopTimer(&timer, 3); // Merge partial results to the host CPU startTimer(&timer, 4); uint32_t r, c, t; for (c = 0; c < A->vert_partitions; c++) { for (r = 0; r < A->horz_partitions; r++) { #pragma omp parallel for num_threads(p.nthreads) shared(A, z, y, max_rows_per_dpu, c, r) private(t) for (t = 0; t < part_info->row_split[c * (A->horz_partitions + 1) + r+1] - part_info->row_split[c * (A->horz_partitions + 1) + r]; t++) { z[part_info->row_split[c * (A->horz_partitions + 1) + r] + t] += y[(c * A->horz_partitions + r) * max_rows_per_dpu + t]; } } } stopTimer(&timer, 4); // Print timing results printf("\n"); printf("Load Matrix "); printTimer(&timer, 0); printf("Load Input Vector "); printTimer(&timer, 1); printf("Kernel "); printTimer(&timer, 2); printf("Retrieve Output Vector "); printTimer(&timer, 3); printf("Merge Partial Results "); printTimer(&timer, 4); printf("\n\n"); #if CHECK_CORR // Check output startTimer(&timer, 4); val_dt *y_host = (val_dt *) calloc(nrows_pad, sizeof(val_dt)); spmv_host(y_host, A, x); bool status = true; i = 0; for (i = 0; i < A->nrows; i++) { if(y_host[i] != z[i]) { status = false; } } if (status) { printf("[" ANSI_COLOR_GREEN "OK" ANSI_COLOR_RESET "] Outputs are equal\n"); } else { printf("[" ANSI_COLOR_RED "ERROR" ANSI_COLOR_RESET "] Outputs differ!\n"); } free(y_host); #endif // Deallocation freeRBDCSRMatrix(A); free(x); free(z); free(y); partition_free(part_info); DPU_ASSERT(dpu_free(dpu_set)); return 0; }
cholesky.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <assert.h> #include <mpi.h> #include <string.h> #include <omp.h> //param /*#define RUN_SERIAL*/ #define RUN_PARALLEL #define N 100 #define WEAK_SCALING // if weak_scaling is defined, n = N * np * omp_get_num_threads() //debug /*#define DEBUG*/ /*#define VERBOSE*/ //printing precision #define PRECISION 2 //other constants #define ROOT_RANK 0 #define EPS 1e-10 #ifdef DEBUG #define MPI(x) mpi_result = x; \ if (mpi_result != MPI_SUCCESS) { \ print_error(mpi_result); \ } \ assert(mpi_result == MPI_SUCCESS); #else // DEBUG #define MPI(x) mpi_result = x; assert(mpi_result == MPI_SUCCESS); #endif // DEBUG //matrix element - row major #define ELM(a, r, c, ld) ((a) + (r) * (ld) + c) void print_error(int mpi_result){ int eclass, estr_len; char estring[MPI_MAX_ERROR_STRING]; MPI_Error_class(mpi_result, &eclass); MPI_Error_string(mpi_result, estring, &estr_len); printf("Error %d, class %d: %s\n", mpi_result, eclass, estring); fflush(stdout); } /* matrix-vector multiply : y = A * x, where A is symmetric and only lower half are stored */ void symMatVec(int n, double *a, double *x, double *y) { int i, j; for (i=0; i< n; i++) { double t = 0.0; for (j=0; j<= i; j++) t += *ELM(a, i, j, n) * x[j]; for (j= i+1; j< n; j++) t += *ELM(a, j, i, n) * x[j]; y[i] = t; } } void print_lower(double *a, int n, int ld){ int i, j; for(i = 0; i < n; i++){ for(j = 0; j <= i; j++) printf("%.*lf\t", PRECISION, *ELM(a, i, j, ld)); printf("\n"); } } void print_upper(double *a, int n, int ld){ int i, j; for(i = 0; i < n; i++){ for(j = 0; j < n; j++) if (j < i) printf("__________\t"); else printf("%.*lf\t", PRECISION, *ELM(a, i, j, ld)); printf("\n"); } } void print_full(double *a, int nrow, int ncol, int ld){ int i, j; for(i = 0; i < nrow; i++){ for(j = 0; j <ncol; j++) printf("%.*lf\t", PRECISION, *ELM(a, i, j, ld)); printf("\n"); } } /* solve Ax = b */ void solveSym_serial(int n, double *a, double *x, double *b) { int i, j, k; /* LDLT decomposition: A = L * D * L^t */ for (i=0; i< n; i++) { double invp = 1.0 / *ELM(a, i, i, n); for (j= i+1; j< n; j++) { double aji = *ELM(a, j, i, n); *ELM(a,j, i, n) *= invp; for (k= i+1; k<= j; k++) *ELM(a, j, k, n) -= aji * *ELM(a, k, i, n); } #ifdef VERBOSE printf("matrix after iteration %d\n", i); print_lower(a, n, n); printf("\n"); #endif // VERBOSE } #ifdef VERBOSE printf("array after serial ldlt: \n"); print_lower(a, n, n); printf("\n"); #endif // VERBOSE /* forward solve L y = b: but y is stored in x can be merged to the previous loop */ for (i=0; i< n; i++) { double t = b[i]; for (j=0; j< i; j++) t -= *ELM(a, i, j, n) * x[j]; x[i] = t; } /* backward solve D L^t x = y */ for (i= n-1; i>= 0; i--) { double t = x[i] / *ELM(a, i, i, n); for (j= i+1; j< n; j++) t -= *ELM(a, j, i, n) * x[j]; x[i] = t; } } //calculate number of rows for process with rank inline int get_nrows(int n, int np, int rank){ return (n + np - rank - 1) / np; } //get original row index (size) from local row inline int get_row(int np, int rank, int local_row){ return local_row * np + rank; } /* solve Ax = b */ void solveSym(int rank, int np, int n, double *a, double *x, double *b) { int i, j, k, tag, receiver, mpi_result, row, skipped_rows_count, sender; int nrows_local = get_nrows(n, np, rank); double *local_a = malloc(sizeof(double) * n * nrows_local); double *local_a_t = malloc(sizeof(double) * n * nrows_local); double *first_column = malloc(sizeof(double) * n); double *allgather_buf = malloc(sizeof(double) * n); assert(local_a != NULL); assert(local_a_t != NULL); assert(first_column != NULL); assert(allgather_buf != NULL); double tmp, aji; int nrequests; if (rank == 0) nrequests = n - nrows_local; else nrequests = nrows_local; MPI_Request *requests = malloc(sizeof(MPI_Request) * nrequests); MPI_Status *statuses = malloc(sizeof(MPI_Status) * nrequests); int *displs = malloc(sizeof(int) * np); int *recvcounts = malloc(sizeof(int) * np); assert(requests != NULL); assert(statuses != NULL); assert(displs != NULL); assert(recvcounts != NULL); //root process if (rank == ROOT_RANK){ j = 0; //deliver row data to each other processes from root process #pragma omp parallel for private(tag, receiver, k) for(i = 0; i < n; i++){ tag = i / np; receiver = i % np; if (receiver != 0){ #pragma omp critical k = j++; MPI(MPI_Isend(ELM(a, i, 0, n), i + 1, MPI_DOUBLE, receiver, tag, MPI_COMM_WORLD, requests + k)); } } //copy to my own //(n + np - 1) / np = nrows_local #pragma omp parallel for private(row) for(i = 0; i < nrows_local; i++){ row = get_row(np, rank, i); memcpy(ELM(local_a, i, 0, n), ELM(a, row, 0, n), sizeof(double) * (row + 1)); } }else { //child process #pragma omp parallel for private(row, tag) for(i = 0; i < nrows_local; i++){ row = get_row(np, rank, i); tag = i; MPI(MPI_Irecv(ELM(local_a, i, 0, n), row + 1, MPI_DOUBLE, ROOT_RANK, tag, MPI_COMM_WORLD, requests + i)); } MPI(MPI_Waitall(nrequests, requests, statuses)); /*for(i = 0; i < nrows_local; i++){*/ /*printf("rank %d status %d: %d\n", rank, i, statuses[i].MPI_ERROR);*/ /*if (statuses[i].MPI_ERROR != MPI_SUCCESS)*/ /*print_error(statuses[i].MPI_ERROR);*/ /*}*/ } //transpose to column major #pragma omp parallel for private(row) for(i = 0; i < nrows_local; i++){ //also take diagonal elms row = get_row(np, rank, i); #pragma omp parallel for for(j = 0; j <= row; j++){ *ELM(local_a_t, j, i, nrows_local) = *ELM(local_a, i, j, n); } } //wait all requests in root process if (rank == ROOT_RANK){ MPI(MPI_Waitall(nrequests, requests, statuses)); } /* LDLT decomposition: A = L * D * L^t */ for(i = 0; i < n; i++){ for(j = 0; j < np; j++){ recvcounts[j] = get_nrows(n, np, j) - get_nrows(i, np, j); displs[j] = j == 0 ? 0 : displs[j - 1] + recvcounts[j -1]; } //broadcast first column (i.e. first row of column-major) of current iteration skipped_rows_count = get_nrows(i, np, rank); MPI(MPI_Allgatherv(ELM(local_a_t, i, skipped_rows_count, nrows_local), recvcounts[rank], MPI_DOUBLE, allgather_buf, recvcounts, displs, MPI_DOUBLE, MPI_COMM_WORLD)); #ifdef VERBOSE if (i == 1) for(j = 0; j < np; j++){ MPI(MPI_Barrier(MPI_COMM_WORLD)); if (rank == j){ printf("allgather (rank = %d): ", j); for(k = 0; k < n; k++) printf("%.*lf\t", PRECISION, allgather_buf[k]); printf("\n"); } } #endif //put elms of collected buffer into correct order #pragma omp parallel for private(row) for(j = 0; j < np; j++){ row = recvcounts[j]; #pragma omp parallel for for(k = 0; k < row; k++) first_column[(k + get_nrows(i, np, j)) * np + j] = allgather_buf[displs[j] + k]; } #ifdef VERBOSE if (i == 1) for(j = 0; j < np; j++){ MPI(MPI_Barrier(MPI_COMM_WORLD)); if (rank == j){ printf("first column (rank = %d): ", j); for(k = i; k < n; k++) printf("%.*lf\t", PRECISION, first_column[k]); printf("\n"); } } #endif //pre devide first_column to speedup (reduce deviding operation) row = get_row(np, rank, nrows_local -1); #pragma omp parallel for for(j = i + 1; j <= row; j++) first_column[j] /= first_column[i]; //do LDLT calculation //for all rows skipped_rows_count = get_nrows(i + 1, np, rank); //get_nrows of i + 1 because we are going to skip the row i (do j-loop from i+ 1 to n) #pragma omp parallel for private(aji, row) for(j = skipped_rows_count; j < nrows_local; j++){ //backup aji aji = *ELM(local_a_t, i, j, nrows_local); row = get_row(np, rank, j); //first elm *ELM(local_a_t, i, j, nrows_local) = first_column[row]; //other elms #pragma omp parallel for for(k = i + 1; k <= row; k++) *ELM(local_a_t, k, j, nrows_local) -= aji * first_column[k]; } MPI(MPI_Barrier(MPI_COMM_WORLD)); #ifdef VERBOSE for(j = 0; j < np; j++){ if (rank == j){ printf("matrix(transposed) after iteration %d rank %d\n", i, j); print_full(local_a_t, get_row(np, rank, nrows_local - 1) + 1, nrows_local, nrows_local); printf("\n"); fflush(stdout); } MPI(MPI_Barrier(MPI_COMM_WORLD)); } if (rank == ROOT_RANK){ printf("first column: "); for(j = i; j < n; j++) printf("%.*lf\t", PRECISION, first_column[j]); printf("\n"); } #endif // VERBOSE } //transpose back to row-major #pragma omp parallel for private(row) for(i = 0; i < nrows_local; i++){ row = get_row(np, rank, i); //also take diagonal elms #pragma omp parallel for for(j = 0; j <= row; j++){ *ELM(local_a, i, j, n) = *ELM(local_a_t, j, i, nrows_local); } if (rank != ROOT_RANK){ //trasfer back to root process MPI( MPI_Isend( ELM(local_a, i, 0, n), row + 1, MPI_DOUBLE, ROOT_RANK, i, MPI_COMM_WORLD, requests + i )); } } if (rank == ROOT_RANK){ //receive calculated buffer from all processes j = 0; #pragma omp parallel for private(tag, sender, k) for(i = 0; i < n; i++){ tag = i / np; sender = i % np; if (sender != 0){ #pragma omp critical k = j++; MPI(MPI_Irecv(ELM(a, i, 0, n), i + 1, MPI_DOUBLE, sender, tag, MPI_COMM_WORLD, requests + k)); } } //copy from my own //(n + np - 1) / np = nrows_local #pragma omp parallel for private(row) for(i = 0; i < nrows_local; i++){ row = get_row(np, rank, i); memcpy(ELM(a, row, 0, n), ELM(local_a, i, 0, n), sizeof(double) * (row + 1)); } /*MPI(MPI_Waitall(nrequests, requests, statuses));*/ MPI(MPI_Waitall(nrequests, requests, statuses)); /*for(i = 0; i < nrows_local; i++){*/ /*if (statuses[i].MPI_ERROR != MPI_SUCCESS){*/ /*printf("rank %d status %d: %d\n", rank, i, statuses[i].MPI_ERROR);*/ /*print_error(statuses[i].MPI_ERROR);*/ /*}*/ /*}*/ #ifdef VERBOSE if (rank == ROOT_RANK){ printf("array after parallel computation: \n"); print_lower(a, n, n); printf("\n"); } #endif // VERBOSE /* forward solve L y = b: but y is stored in x can be merged to the previous loop */ for (i=0; i< n; i++) { double t = b[i]; #pragma omp parallel for reduction(-:t) for (j=0; j< i; j++) t -= *ELM(a, i, j, n) * x[j]; x[i] = t; } /* backward solve D L^t x = y */ for (i= n-1; i>= 0; i--) { double t = x[i] / *ELM(a, i, i, n); #pragma omp parallel for reduction(-:t) for (j= i+1; j< n; j++) t -= *ELM(a, j, i, n) * x[j]; x[i] = t; } } else { MPI(MPI_Waitall(nrequests, requests, statuses)); } free(statuses); free(local_a); free(requests); free(first_column); free(allgather_buf); free(displs); free(recvcounts); free(local_a_t); } double norm(double *x, double* y, int n){ /* check error norm */ double e = 0; int i; for (i=0; i< n; i++) e += (x[i] - y[i]) * (x[i] - y[i]); return sqrt(e); } //error handler void cholesky_mpi_error_handler( MPI_Comm *comm, int *err, ... ) { if (*err != MPI_ERR_OTHER) { printf( "Unexpected error code\n" );fflush(stdout); } else { printf("error caused in comm %d, error: %d", *comm, err ? *err : -1); fflush(stdout); } } typedef struct { double serial; double parallel; } bmtime_t; bmtime_t benchmark(int n, int np, int rank){ bmtime_t bmtime; int i, j, mpi_result; double *a, *xx, *b, *x, e, s, *a_copy, *b_copy; double time_start, time_stop; if (rank == ROOT_RANK){ /* matrix */ a = malloc(sizeof(double) * n * n); assert(a != NULL); /* fill lower triangular elements */ for (i=0; i< n; i++) for (j=0; j< i; j++) *ELM(a, i, j, n) = rand()/(RAND_MAX + 1.0); /* fill diagonal elements */ for (i=0; i< n; i++) { s = 0.0; for (j=0; j< i; j++) s += *ELM(a, i, j, n); for (j= i+1; j< n; j++) s += *ELM(a, j, i, n); /* upper triangular */ *ELM(a, i, i, n) = s + 1.0; /* diagonal dominant */ } #ifdef VERBOSE printf("original matrix: \n"); print_lower(a, n, n); printf("\n"); #endif // VERBOSE /* first make the solution */ xx = malloc(sizeof(double) * n); assert(xx != NULL); for (i=0; i< n; i++) xx[i] = 1.0; /* or anything you like */ /* make right hand side b = Ax */ b = malloc(sizeof(double) * n); assert(b != NULL); symMatVec(n, a, xx, b); /* solution vector, pretend to be unknown */ x = malloc(sizeof(double) * n); assert(x != NULL); #ifdef RUN_SERIAL //clone data a_copy = malloc(sizeof(double) * n * n); assert(a_copy != NULL); b_copy = malloc(sizeof(double) * n); assert(b_copy != NULL); memcpy(a_copy, a, sizeof(double) * n * n); memcpy(b_copy, b, sizeof(double) * n); //serial solver time_start = MPI_Wtime(); solveSym_serial(n, a_copy, x, b_copy); time_stop = MPI_Wtime(); bmtime.serial = time_stop - time_start; e = norm(x, xx, n); if (e >= EPS){ bmtime.serial = -1; fprintf(stderr, "expected error norm less than %e, but %e received while serial benchmark with size = %d\n", EPS, e, n); } //free data free(a_copy); free(b_copy); memset(x, 0, sizeof(double) * n); #else // RUN_SERIAL bmtime.serial = 0; #endif // RUN_SERIAL } #ifdef RUN_PARALLEL MPI(MPI_Barrier(MPI_COMM_WORLD)); //parallel version time_start = MPI_Wtime(); /* solve: the main computation */ solveSym(rank, np, n, a, x, b); MPI(MPI_Barrier(MPI_COMM_WORLD)); time_stop = MPI_Wtime(); bmtime.parallel = time_stop - time_start; if (rank == ROOT_RANK) { e = norm(x, xx, n); if (e >= EPS){ bmtime.parallel = -1; fprintf(stderr, "expected error norm less than %e, but %e received while parallel benchmark with size = %d\n", EPS, e, n); } } #else // RUN_PARALLEL bmtime.parallel = -1; #endif // RUN_PARALLEL if (rank == ROOT_RANK){ //free data free(a); free(xx); free(b); free(x); } return bmtime; } int main(int argc, char **argv) { int mpi_result, n; double time_start, time_stop; // Initialize the MPI environment int provided; MPI(MPI_Init_thread(&argc, &argv, MPI_THREAD_MULTIPLE, &provided )); if (MPI_THREAD_MULTIPLE != provided){ fprintf(stderr, "Expected mpi thread support %d but %d returned\n", MPI_THREAD_MULTIPLE, provided); return 1; } MPI(MPI_Barrier(MPI_COMM_WORLD)); time_start = MPI_Wtime(); //set error handler MPI_Errhandler err_handler; /*MPI(MPI_Comm_create_errhandler(&cholesky_mpi_error_handler, &err_handler));*/ /*MPI(MPI_Comm_set_errhandler(MPI_COMM_WORLD, err_handler));*/ #ifdef DEBUG MPI(MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN)); #endif // DEBUG // Get the number of processes int np; MPI(MPI_Comm_size(MPI_COMM_WORLD, &np)); // Get the rank of the process int rank; MPI(MPI_Comm_rank(MPI_COMM_WORLD, &rank)); int nt; #pragma omp parallel #pragma omp master nt = omp_get_num_threads(); #ifdef WEAK_SCALING n = N * np * nt; #else // WEAK_SCALING n = N; #endif // WEAK_SCALING bmtime_t bmtime = benchmark(n, np, rank); if (rank == ROOT_RANK){ printf("%d\t%d\t%d\t%.10lf\t%.10lf\n", np, nt, n, bmtime.serial, bmtime.parallel); } MPI(MPI_Barrier(MPI_COMM_WORLD)); time_stop = MPI_Wtime(); /*if (rank == ROOT_RANK){*/ /*printf("Total job time / limit (10 min): %.2lf%%\n", (time_stop - time_start) / 60 / 10 * 100);*/ /*}*/ //free error handler /*MPI_Errhandler_free( &err_handler );*/ // Finalize the MPI environment. MPI_Finalize(); return 0; }
sieve2-task.c
#include <math.h> #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef _OPENMP #include <omp.h> #endif double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME stamp = getusec_() - stamp;\ stamp = stamp/1e6; // process only odd numbers of a specified block int eratosthenesBlock(const int from, const int to) { // 1. create a list of natural numbers 2, 3, 4, ... all of them initially marked as potential primes const int memorySize = (to - from + 1) / 2; // only odd numbers char * isPrime = (char *) malloc((memorySize) * sizeof(char)); for (int i = 0; i < memorySize; i++) isPrime[i] = 1; // 2. Starting from i=3, the first unmarked number on the list ... for (int i = 3; i*i <= to; i+=2) { // ... find the smallest number greater or equal than i that is unmarked // skip multiples of three: 9, 15, 21, 27, ... if (i >= 3*3 && i % 3 == 0) continue; // skip multiples of five if (i >= 5*5 && i % 5 == 0) continue; // skip multiples of seven if (i >= 7*7 && i % 7 == 0) continue; // skip multiples of eleven if (i >= 11*11 && i % 11 == 0) continue; // skip multiples of thirteen if (i >= 13*13 && i % 13 == 0) continue; // skip numbers before current slice int minJ = ((from+i-1)/i)*i; if (minJ < i*i) minJ = i*i; // start value must be odd if ((minJ & 1) == 0) minJ += i; // 3. Mark all multiples of i between i^2 and lastNumber for (int j = minJ; j <= to; j += 2*i) { int index = j - from; isPrime[index/2] = 0; } } // 4. The unmarked numbers are primes, count primes int found = 0; for (int i = 0; i < memorySize; i++) found += isPrime[i]; // 2 is not odd => include on demand if (from <= 2) found++; // 5. We are done with the isPrime array, free it free(isPrime); return found; } // process slice-by-slice int eratosthenes(int lastNumber, int sliceSize) { int found = 0; // each slice covers ["from" ... "to"], incl. "from" and "to" #pragma omp parallel { #pragma omp single { for (int from = 2; from <= lastNumber; from += sliceSize) { #pragma omp task { int to = from + sliceSize; if (to > lastNumber) to = lastNumber; int res = eratosthenesBlock(from, to); #pragma omp atomic found += res; } } } } return found; } void usage(void) { #ifdef _OPENMP printf("sieve <range> <slice_size> <thread count>\n"); printf(" <range> is an integer N - the range is from 2 - N\n"); printf(" <slice_size> is to sieve the list from 2 - N in blocks\n"); printf(" <thread count> is the number of threads to use\n"); #else printf("sieve <range> <slice_size>\n"); printf(" <range> is an integer N - the range is from 2 - N\n"); printf(" <slice_size> is to sieve the list from 2 - N in blocks\n"); #endif } int main(int argc, char ** argv) { // argv[1]: Upper-bound on primes // argv[2]: Number of threads to run in parallel if OpenMP enabled #ifdef _OPENMP if (argc != 4) { #else if (argc != 3) { #endif printf("Error: Invalid number of arguments\n"); usage(); return 0; } int range_max = atoi(argv[1]); int slice_size = atoi(argv[2]); #ifdef _OPENMP int num_threads = atoi(argv[3]); #endif if (range_max < 2) { printf("Error: <range> Must be an integer greater than or equal to 2\n"); usage(); return 0; } if ((slice_size > range_max) || (slice_size < 2)) { printf("Error: <slice_size> Must be an integer greater than or equal to 2 but smaller or equal than range\n"); usage(); return 0; } #ifdef _OPENMP if (num_threads < 1) { printf("Error: <thread count> Must be a positive value between 1 an %d\n", omp_get_max_threads()); usage(); return 0; } else if (num_threads > omp_get_max_threads()) { num_threads = omp_get_max_threads(); } omp_set_num_threads(num_threads); // Make sure we haven't created too many threads. int temp = (range_max - 1) / num_threads; if ((1 + temp) < (int)sqrt((double)range_max)) { printf("Error: Too many threads requested!\n"); printf(" The first thread must have a block size >= sqrt(n)\n"); exit(1); } #endif double stamp; START_COUNT_TIME; // Solutions count int count = eratosthenes(range_max, slice_size); STOP_COUNT_TIME; // Print the results. printf("Number of primes smaller than or equal to %d = %d\n", range_max, count); printf ("%0.6f\n", stamp); return 0; }
fci_4pdm.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <string.h> #include <assert.h> //#include <omp.h> #include "config.h" #include "vhf/fblas.h" #include "fci.h" #define MIN(X,Y) ((X)<(Y)?(X):(Y)) #define BLK 48 #define BUFBASE 96 double FCI_t1ci_sf(double *ci0, double *t1, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb); /* * t2[:,i,j,k,l] = E^i_j E^k_l|ci0> */ static void rdm4_0b_t2(double *ci0, double *t2, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const int nnorb = norb * norb; const int n4 = nnorb * nnorb; int i, j, k, l, a, sign, str1; double *t1 = malloc(sizeof(double) * nb * nnorb); double *pt1, *pt2; _LinkT *tab; // form t1 which has beta^+ beta |t1> => target stra_id FCI_t1ci_sf(ci0, t1, nb, stra_id, 0, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); #pragma omp parallel private(i, j, k, l, a, str1, sign, pt1, pt2, tab) { #pragma omp for schedule(static, 1) nowait for (k = 0; k < bcount; k++) { memset(t2+k*n4, 0, sizeof(double)*n4); tab = clink_indexb + (strb_id+k) * nlinkb; for (j = 0; j < nlinkb; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); pt1 = t1 + str1 * nnorb; pt2 = t2 + k * n4 + (i*norb+a)*nnorb; if (sign > 0) { for (l = 0; l < nnorb; l++) { pt2[l] += pt1[l]; } } else { for (l = 0; l < nnorb; l++) { pt2[l] -= pt1[l]; } } } } } free(t1); } /* * t2[:,i,j,k,l] = E^i_j E^k_l|ci0> */ static void rdm4_a_t2(double *ci0, double *t2, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const int nnorb = norb * norb; const int n4 = nnorb * nnorb; int i, j, k, l, a, sign, str1; double *pt1, *pt2; _LinkT *tab = clink_indexa + stra_id * nlinka; #pragma omp parallel private(i, j, k, l, a, str1, sign, pt1, pt2) { double *t1 = malloc(sizeof(double) * bcount * nnorb); #pragma omp for schedule(static, 40) for (j = 0; j < nlinka; j++) { a = EXTRACT_CRE (tab[j]); i = EXTRACT_DES (tab[j]); str1 = EXTRACT_ADDR(tab[j]); sign = EXTRACT_SIGN(tab[j]); // form t1 which has alpha^+ alpha |t1> => target stra_id (through str1) FCI_t1ci_sf(ci0, t1, bcount, str1, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); for (k = 0; k < bcount; k++) { pt1 = t1 + k * nnorb; pt2 = t2 + k * n4 + (i*norb+a)*nnorb; if (sign > 0) { for (l = 0; l < nnorb; l++) { pt2[l] += pt1[l]; } } else { for (l = 0; l < nnorb; l++) { pt2[l] -= pt1[l]; } } } } free(t1); } } void FCI_t2ci_sf(double *ci0, double *t2, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { rdm4_0b_t2(ci0, t2, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); rdm4_a_t2 (ci0, t2, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); } static void tril3pdm_particle_symm(double *rdm3, double *tbra, double *t2ket, int bcount, int ncre, int norb) { assert(norb <= BLK); const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; int nnorb = norb * norb; int n4 = nnorb * nnorb; int i, j, k, m, n, blk1; int iblk = MIN(BLK/norb, norb); int blk = iblk * norb; //dgemm_(&TRANS_N, &TRANS_T, &n4, &nncre, &bcount, // &D1, t2ket, &n4, tbra, &nnorb, &D1, rdm3, &n4); // "upper triangle" F-array[k,j,i], k<=i<=j for (j = 0; j < ncre; j++) { for (n = 0; n < norb; n++) { for (k = 0; k < j+1-iblk; k+=iblk) { m = k * norb; i = m + blk; dgemm_(&TRANS_N, &TRANS_T, &i, &blk, &bcount, &D1, t2ket, &n4, tbra+m, &nnorb, &D1, rdm3+m*n4, &n4); } m = k * norb; i = (j+1) * norb; blk1 = i - m; dgemm_(&TRANS_N, &TRANS_T, &i, &blk1, &bcount, &D1, t2ket, &n4, tbra+m, &nnorb, &D1, rdm3+m*n4, &n4); t2ket += nnorb; rdm3 += nnorb; } } } static void tril2pdm_particle_symm(double *rdm2, double *tbra, double *tket, int bcount, int ncre, int norb) { assert(norb <= BLK); const char TRANS_N = 'N'; const char TRANS_T = 'T'; const double D1 = 1; int nnorb = norb * norb; int nncre = norb * ncre; int m, n; int blk = MIN(BLK/norb, norb) * norb; //dgemm_(&TRANS_N, &TRANS_T, &nncre, &nncre, &bcount, // &D1, tket, &nnorb, tbra, &nnorb, &D1, rdm2, &nnorb); // upper triangle part of F-array for (m = 0; m < nncre-blk; m+=blk) { n = m + blk; dgemm_(&TRANS_N, &TRANS_T, &n, &blk, &bcount, &D1, tket, &nnorb, tbra+m, &nnorb, &D1, rdm2+m*nnorb, &nnorb); } n = nncre - m; dgemm_(&TRANS_N, &TRANS_T, &nncre, &n, &bcount, &D1, tket, &nnorb, tbra+m, &nnorb, &D1, rdm2+m*nnorb, &nnorb); } static void make_rdm12_sf(double *rdm1, double *rdm2, double *bra, double *ket, double *t1bra, double *t1ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb) { const char TRANS_N = 'N'; const char TRANS_T = 'T'; const int INC1 = 1; const double D1 = 1; const int nnorb = norb * norb; int k, l; size_t n; double *tbra = malloc(sizeof(double) * nnorb * bcount); double *pbra, *pt1; for (n = 0; n < bcount; n++) { pbra = tbra + n * nnorb; pt1 = t1bra + n * nnorb; for (k = 0; k < norb; k++) { for (l = 0; l < norb; l++) { pbra[k*norb+l] = pt1[l*norb+k]; } } } dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, t1ket, &nnorb, tbra, &nnorb, &D1, rdm2, &nnorb); dgemv_(&TRANS_N, &nnorb, &bcount, &D1, t1ket, &nnorb, bra+stra_id*nb+strb_id, &INC1, &D1, rdm1, &INC1); free(tbra); } static void make_rdm12_spin0(double *rdm1, double *rdm2, double *bra, double *ket, double *t1bra, double *t1ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb) { const char TRANS_N = 'N'; const char TRANS_T = 'T'; const int INC1 = 1; const double D1 = 1; const int nnorb = norb * norb; int k, l; size_t n; double *tbra = malloc(sizeof(double) * nnorb * bcount); double *pbra, *pt1; double factor; for (n = 0; n < bcount; n++) { if (n+strb_id == stra_id) { factor = 1; } else { factor = 2; } pbra = tbra + n * nnorb; pt1 = t1bra + n * nnorb; for (k = 0; k < norb; k++) { for (l = 0; l < norb; l++) { pbra[k*norb+l] = pt1[l*norb+k] * factor; } } } dgemm_(&TRANS_N, &TRANS_T, &nnorb, &nnorb, &bcount, &D1, t1ket, &nnorb, tbra, &nnorb, &D1, rdm2, &nnorb); dgemv_(&TRANS_N, &nnorb, &bcount, &D1, tbra, &nnorb, bra+stra_id*na+strb_id, &INC1, &D1, rdm1, &INC1); free(tbra); } void FCI4pdm_kern_sf(double *rdm1, double *rdm2, double *rdm3, double *rdm4, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const int nnorb = norb * norb; const int n4 = nnorb * nnorb; const int n3 = nnorb * norb; const size_t n6 = nnorb * nnorb * nnorb; int i, j, k, l, ij; size_t n; double *tbra; double *t1bra = malloc(sizeof(double) * nnorb * bcount * 2); double *t2bra = malloc(sizeof(double) * n4 * bcount * 2); double *t1ket = t1bra + nnorb * bcount; double *t2ket = t2bra + n4 * bcount; double *pbra, *pt2; // t2[:,i,j,k,l] = E^i_j E^k_l|ket> FCI_t1ci_sf(bra, t1bra, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(bra, t2bra, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (bra == ket) { t1ket = t1bra; t2ket = t2bra; } else { FCI_t1ci_sf(ket, t1ket, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(ket, t2ket, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); } #pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2) { tbra = malloc(sizeof(double) * nnorb * bcount); #pragma omp for schedule(static, 1) nowait for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k) for (n = 0; n < bcount; n++) { for (k = 0; k < norb; k++) { pbra = tbra + n * nnorb + k*norb; pt2 = t2bra + n * n4 + k*nnorb + ij; for (l = 0; l < norb; l++) { pbra[l] = pt2[l*n3]; } } } i = ij / norb; j = ij - i * norb; // contract <bra-of-Eij| with |E^k_l E^m_n ket> tril3pdm_particle_symm(rdm4+(j*norb+i)*n6, tbra, t2ket, bcount, j+1, norb); // rdm3 tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket, bcount, j+1, norb); } free(tbra); } make_rdm12_sf(rdm1, rdm2, bra, ket, t1bra, t1ket, bcount, stra_id, strb_id, norb, na, nb); free(t1bra); free(t2bra); } /* * use symmetry ci0[a,b] == ci0[b,a], t2[a,b,...] == t2[b,a,...] */ void FCI4pdm_kern_spin0(double *rdm1, double *rdm2, double *rdm3, double *rdm4, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { int fill1; if (strb_id+bcount <= stra_id) { fill1 = bcount; } else if (stra_id >= strb_id) { fill1 = stra_id - strb_id + 1; } else { return; } const int nnorb = norb * norb; const int n4 = nnorb * nnorb; const int n3 = nnorb * norb; const size_t n6 = nnorb * nnorb * nnorb; int i, j, k, l, ij; size_t n; double factor; double *tbra; double *t1bra = malloc(sizeof(double) * nnorb * fill1 * 2); double *t2bra = malloc(sizeof(double) * n4 * fill1 * 2); double *t1ket = t1bra + nnorb * fill1; double *t2ket = t2bra + n4 * fill1; double *pbra, *pt2; FCI_t1ci_sf(bra, t1bra, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(bra, t2bra, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); if (bra == ket) { t1ket = t1bra; t2ket = t2bra; } else { FCI_t1ci_sf(ket, t1ket, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(ket, t2ket, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); } #pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2, factor) { tbra = malloc(sizeof(double) * nnorb * fill1); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k) i = ij / norb; j = ij - i * norb; for (n = 0; n < fill1; n++) { if (n+strb_id == stra_id) { factor = 1; } else { factor = 2; } for (k = 0; k <= j; k++) { pbra = tbra + n * nnorb + k*norb; pt2 = t2bra + n * n4 + k*nnorb + ij; for (l = 0; l < norb; l++) { pbra[l] = pt2[l*n3] * factor; } } } // contract <bra-of-Eij| with |E^k_l E^m_n ket> tril3pdm_particle_symm(rdm4+(j*norb+i)*n6, tbra, t2ket, fill1, j+1, norb); // rdm3 tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket, fill1, j+1, norb); } free(tbra); } make_rdm12_spin0(rdm1, rdm2, bra, ket, t1bra, t1ket, fill1, stra_id, strb_id, norb, na, nb); free(t1bra); free(t2bra); } /* * This function returns incomplete rdm3, rdm4, in which, particle * permutation symmetry is assumed. * kernel can be FCI4pdm_kern_sf, FCI4pdm_kern_spin0 */ void FCIrdm4_drv(void (*kernel)(), double *rdm1, double *rdm2, double *rdm3, double *rdm4, double *bra, double *ket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { const size_t nnorb = norb * norb; const size_t n4 = nnorb * nnorb; int ib, strk, bcount; _LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na); _LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clinka, link_indexa, norb, na, nlinka); FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb); memset(rdm1, 0, sizeof(double) * nnorb); memset(rdm2, 0, sizeof(double) * n4); memset(rdm3, 0, sizeof(double) * n4 * nnorb); memset(rdm4, 0, sizeof(double) * n4 * n4); for (strk = 0; strk < na; strk++) { for (ib = 0; ib < nb; ib += BUFBASE) { bcount = MIN(BUFBASE, nb-ib); (*kernel)(rdm1, rdm2, rdm3, rdm4, bra, ket, bcount, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } } free(clinka); free(clinkb); } void FCI3pdm_kern_sf(double *rdm1, double *rdm2, double *rdm3, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { const int nnorb = norb * norb; const int n4 = nnorb * nnorb; const int n3 = nnorb * norb; int i, j, k, l, ij; size_t n; double *tbra; double *t1bra = malloc(sizeof(double) * nnorb * bcount); double *t1ket = malloc(sizeof(double) * nnorb * bcount); double *t2bra = malloc(sizeof(double) * n4 * bcount); double *pbra, *pt2; // t2[:,i,j,k,l] = E^i_j E^k_l|ket> FCI_t1ci_sf(bra, t1bra, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t2ci_sf(bra, t2bra, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t1ci_sf(ket, t1ket, bcount, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); #pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2) { tbra = malloc(sizeof(double) * nnorb * bcount); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k) for (n = 0; n < bcount; n++) { pbra = tbra + n * nnorb; pt2 = t2bra + n * n4 + ij; for (k = 0; k < norb; k++) { for (l = 0; l < norb; l++) { pbra[k*norb+l] = pt2[l*n3+k*nnorb]; } } } i = ij / norb; j = ij - i * norb; tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket, bcount, j+1, norb); } free(tbra); } make_rdm12_sf(rdm1, rdm2, bra, ket, t1bra, t1ket, bcount, stra_id, strb_id, norb, na, nb); free(t1bra); free(t1ket); free(t2bra); } /* * use symmetry ci0[a,b] == ci0[b,a], t2[a,b,...] == t2[b,a,...] */ void FCI3pdm_kern_spin0(double *rdm1, double *rdm2, double *rdm3, double *bra, double *ket, int bcount, int stra_id, int strb_id, int norb, int na, int nb, int nlinka, int nlinkb, _LinkT *clink_indexa, _LinkT *clink_indexb) { int fill1; if (strb_id+bcount <= stra_id) { fill1 = bcount; } else if (stra_id >= strb_id) { fill1 = stra_id - strb_id + 1; } else { return; } const int nnorb = norb * norb; const int n4 = nnorb * nnorb; const int n3 = nnorb * norb; int i, j, k, l, ij; size_t n; double factor; double *tbra; double *t1bra = malloc(sizeof(double) * nnorb * fill1); double *t1ket = malloc(sizeof(double) * nnorb * fill1); double *t2bra = malloc(sizeof(double) * n4 * fill1); double *pbra, *pt2; // t2[:,i,j,k,l] = E^i_j E^k_l|ket> FCI_t2ci_sf(bra, t2bra, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t1ci_sf(bra, t1bra, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); FCI_t1ci_sf(ket, t1ket, fill1, stra_id, strb_id, norb, na, nb, nlinka, nlinkb, clink_indexa, clink_indexb); #pragma omp parallel private(ij, i, j, k, l, n, tbra, pbra, pt2, factor) { tbra = malloc(sizeof(double) * nnorb * fill1); #pragma omp for schedule(dynamic, 4) for (ij = 0; ij < nnorb; ij++) { // loop ij for (<ket| E^j_i E^l_k) i = ij / norb; j = ij - i * norb; for (n = 0; n < fill1; n++) { if (n+strb_id == stra_id) { factor = 1; } else { factor = 2; } for (k = 0; k <= j; k++) { pbra = tbra + n * nnorb + k*norb; pt2 = t2bra + n * n4 + k*nnorb + ij; for (l = 0; l < norb; l++) { pbra[l] = pt2[l*n3] * factor; } } } tril2pdm_particle_symm(rdm3+(j*norb+i)*n4, tbra, t1ket, fill1, j+1, norb); } free(tbra); } make_rdm12_spin0(rdm1, rdm2, bra, ket, t1bra, t1ket, fill1, stra_id, strb_id, norb, na, nb); free(t1bra); free(t1ket); free(t2bra); } /* * This function returns incomplete rdm3, in which, particle * permutation symmetry is assumed. * kernel can be FCI3pdm_kern_ms0, FCI3pdm_kern_spin0 */ void FCIrdm3_drv(void (*kernel)(), double *rdm1, double *rdm2, double *rdm3, double *bra, double *ket, int norb, int na, int nb, int nlinka, int nlinkb, int *link_indexa, int *link_indexb) { const size_t nnorb = norb * norb; const size_t n4 = nnorb * nnorb; int ib, strk, bcount; _LinkT *clinka = malloc(sizeof(_LinkT) * nlinka * na); _LinkT *clinkb = malloc(sizeof(_LinkT) * nlinkb * nb); FCIcompress_link(clinka, link_indexa, norb, na, nlinka); FCIcompress_link(clinkb, link_indexb, norb, nb, nlinkb); memset(rdm1, 0, sizeof(double) * nnorb); memset(rdm2, 0, sizeof(double) * n4); memset(rdm3, 0, sizeof(double) * n4 * nnorb); for (strk = 0; strk < na; strk++) { for (ib = 0; ib < nb; ib += BUFBASE) { bcount = MIN(BUFBASE, nb-ib); (*kernel)(rdm1, rdm2, rdm3, bra, ket, bcount, strk, ib, norb, na, nb, nlinka, nlinkb, clinka, clinkb); } } free(clinka); free(clinkb); }
MorphologicalErosionImageFilter.h
/* * MIT License * * Copyright (c) 2018-2019 Benjamin Köhler * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #pragma once #ifndef BK_MORPHOLOGICALEROSIONIMAGEFILTER_H #define BK_MORPHOLOGICALEROSIONIMAGEFILTER_H #include <algorithm> #include <cassert> #include <initializer_list> #include <type_traits> #include <vector> #include <bkDataset/image/filter/KernelFactory.h> #include <bkDataset/image/filter/MorphologicalOperationImageFilter.h> #include <bkDataset/image/filter/DistanceMapImageFilter.h> #include <bkDataset/lib/bkDataset_export.h> #ifdef BK_EMIT_PROGRESS #include <bk/Progress> #include <bk/Localization> #endif namespace bk { class BKDATASET_EXPORT MorphologicalErosionImageFilter { //==================================================================================================== //===== DEFINITIONS //==================================================================================================== using self_type = MorphologicalErosionImageFilter; //==================================================================================================== //===== MEMBERS //==================================================================================================== std::vector<unsigned int> _kernel_size; //==================================================================================================== //===== CONSTRUCTORS & DESTRUCTOR //==================================================================================================== public: /// @{ -------------------------------------------------- CTOR MorphologicalErosionImageFilter(); MorphologicalErosionImageFilter(const self_type& other); MorphologicalErosionImageFilter(self_type&& other) noexcept; MorphologicalErosionImageFilter(unsigned int nDims, unsigned int size); /// @} /// @{ -------------------------------------------------- DTOR ~MorphologicalErosionImageFilter(); /// @} //==================================================================================================== //===== GETTER //==================================================================================================== /// @{ -------------------------------------------------- GET KERNEL SIZE [[nodiscard]] const std::vector<unsigned int>& kernel_size() const; /// @} //==================================================================================================== //===== SETTER //==================================================================================================== /// @{ -------------------------------------------------- OPERATOR = [[maybe_unused]] auto operator=(const self_type& other) -> self_type&; [[maybe_unused]] auto operator=(self_type&& other) noexcept -> self_type&; /// @} /// @{ -------------------------------------------------- SET KERNEL SIZE template<typename T> void set_kernel_size(std::initializer_list<T> ilist) { _kernel_size.assign(ilist); } template<typename Iter> void set_kernel_size(Iter first, Iter last) { _kernel_size.assign(first, last); } void set_kernel_size(unsigned int nDims, unsigned int size); /// @} //==================================================================================================== //===== FUNCTIONS //==================================================================================================== /// @{ -------------------------------------------------- APPLY template<typename TImage> [[nodiscard]] TImage apply(const TImage& img) const { assert(!_kernel_size.empty() && "call set_kernel_size() first"); const bool kernel_has_isotropic_size = std::all_of(_kernel_size.begin(), _kernel_size.end(), [&](unsigned int x) { return x == _kernel_size.front(); }); if (kernel_has_isotropic_size) { #ifdef BK_EMIT_PROGRESS bk::Progress& prog = bk_progress.emplace_task(3, ___("Morphological erosion filtering")); #endif TImage res; res.set_size(img.size()); const auto minVal = img.min_value(); #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif DistanceMapImageFilter f; f.set_value(minVal); auto distance_map = img.filter(f); #ifdef BK_EMIT_PROGRESS prog.increment(1); #endif const unsigned int halfKernelSize = _kernel_size.front() >> 1; #pragma omp parallel for for (unsigned int i = 0; i < img.num_values(); ++i) { res[i] = distance_map[i] <= halfKernelSize ? minVal : img[i]; } #ifdef BK_EMIT_PROGRESS prog.set_finished(); #endif return res; } else { return MorphologicalOperationImageFilter::apply(img, KernelFactory::make_erosion_morphological_of_sizes(_kernel_size)); } } /// @} }; // class MorphologicalErosionImageFilter } // namespace bk #endif //BK_MORPHOLOGICALEROSIONIMAGEFILTER_H
GB_unaryop__lnot_uint8_uint8.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__lnot_uint8_uint8 // op(A') function: GB_tran__lnot_uint8_uint8 // C type: uint8_t // A type: uint8_t // cast: uint8_t cij = (uint8_t) aij // unaryop: cij = !(aij != 0) #define GB_ATYPE \ uint8_t #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint8_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = !(x != 0) ; // casting #define GB_CASTING(z, x) \ uint8_t z = (uint8_t) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LNOT || GxB_NO_UINT8) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__lnot_uint8_uint8 ( uint8_t *restrict Cx, const uint8_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__lnot_uint8_uint8 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ep_seq.c
#include <stdio.h> #include <stdlib.h> #include <string.h> #include <pthread.h> #include <math.h> #include <omp.h> #include "util.h" #define TRUE 1 #define FALSE 0 #define DEBUG 1 #define NMAX 1000 #define MAX_LINE 256 /*Intervalo [0, 255]*/ #define RGB_SIZE 256 #define PI 3.14159265359 long int x; int main(int argc, char **argv) { FILE *arq1, *arq2; char *infile, *outfile; char a[MAX_LINE]; int nr_inter, nr_proc, nr_threads; int i, j, k, cont, columns, lines, comp_max_val; float val, distribute; float gx, gy, g; Pixel **M, **M2, **aux; /*Matriz de pixels*/ i = j = k = 0; /*Usemode*/ if (argc < 5) { printf("Modo de usar:\n\tArg1: nome do arquivo de entrada;\n\tArg2: nome do arquivo de saída\n\t"); printf("Arg3: número de iterações;\n\tArg4: número de processadores.\n\t"); exit(1); } infile = argv[1]; outfile = argv[2]; nr_inter = atoi(argv[3]); nr_proc = atoi(argv[4]); /*nr_threads = 2 * nr_proc;*/ /*Considerando hyperthread*/ /*id = malloc(nr_threads * sizeof(int));*/ arq1 = fopen(infile, "r"); if (arq1 == NULL) printf("Erro, não foi possível abrir o arquivo\n"); else { /*Read the input file*/ if (DEBUG) printf("Consegui abrir o arquivo!\n"); cont = 0; while ((a[0] = fgetc(arq1)) != EOF) { if (a[0] == '#' || a[0] == 'P') { fgets(a, MAX_LINE, arq1); if (DEBUG) printf("Ignorando comentários...\n"); } else if (cont == 0) { ungetc(a[0], arq1); fscanf(arq1,"%d %d\n", &columns, &lines); fscanf(arq1,"%d\n", &comp_max_val); cont++; if (DEBUG) { printf("Num_linhas = %d, num_colunas = %d\n", lines, columns); printf("comp_max_val = %d\n", comp_max_val); } /*Alocação das matrizes*/ M = (Pixel **) malloc(lines * sizeof(Pixel*)); M2 = (Pixel **) malloc(lines * sizeof(Pixel*)); for (i = 0; i < lines; i++) { M[i] = (Pixel *) malloc(columns * sizeof(Pixel)); M2[i] = (Pixel *) malloc(columns * sizeof(Pixel)); } } else { ungetc(a[0], arq1); for (i = 0; i < lines; i++) { for (j = 0; j < columns; j++) { fscanf(arq1, "%f %f %f", &M[i][j].R, &M[i][j].G, &M[i][j].B); M[i][j].R /= RGB_SIZE; M[i][j].G = (2*PI * M[i][j].G) / RGB_SIZE; M[i][j].B /= RGB_SIZE; /* Calcular Rx, Ry, Bx e By quando ler a entrada \/*/ M[i][j].Rx = horizontal_component(M[i][j].R, M[i][j].G); M[i][j].Bx = horizontal_component(M[i][j].B, M[i][j].G); M[i][j].Ry = vertical_component(M[i][j].R, M[i][j].G); M[i][j].By = vertical_component(M[i][j].B, M[i][j].G); } } break; } } } fclose(arq1); if (DEBUG) printf("Arquivo lido!\n"); /*IMPORTANTE: As bordas nunca se alteram.*/ for (k = 0; k < nr_inter; k++) { aux = M; M = M2; M2 = aux; for (i = 1; i < lines - 1; i++) { for (j = 1; j < columns - 1; j++) { if (M[i][j].Rx > 0) { if (j != columns -1) { val = transfer(M[i][j+1].R, M[i][j].Rx); M2[i][j+1].Rx += val; M2[i][j].Rx -= val; } if (j != 1) { val = transfer(M[i][j-1].B, M[i][j].Bx); M2[i][j-1].Bx += val; /*Recebe no sentido oposto*/ M2[i][j].Bx -= val; } } else { /*Recebe um valor positivo*/ if (j != 1) { val = transfer(M[i][j-1].R, M[i][j].Rx); M2[i][j-1].Rx -= val; M2[i][j].Rx += val; } if (j != columns - 1) { val = transfer(M[i][j+1].B, M[i][j].Bx); M2[i][j+1].Bx -= val; /*Recebe no sentido oposto*/ M2[i][j].Bx += val; } } if (M[i][j].Ry > 0) { if (i != 1) { val = transfer(M[i-1][j].R, M[i][j].Ry); M2[i-1][j].Ry += val; M2[i][j].Ry -= val; } if (i != lines - 1) { val = transfer(M[i+1][j].B, M[i][j].By); M2[i+1][j].By += val; M2[i][j].By -= val; } } else { /*Recebe um valor positivo*/ if (i != lines - 1) { val = transfer(M[i+1][j].R, M[i][j].Ry); M2[i+1][j].Ry -= val; M2[i][j].Ry += val; } if (i != 1) { val = transfer(M[i-1][j].B, M[i][j].By); M2[i-1][j].By -= val; M2[i][j].By += val; } } } } /*O bloco abaixo checa se os pixels vizinhos estouraram*/ for (i = 1; i < lines - 1; i++) { for (j = 1; j < columns - 1; j++) { /*Paralelizar as checagens do R e B se tiver pelo menos oito threads, podendo deixar os 4 if's internos em paralelo*/ /*Checa o R*/ if (M2[i][j].R > 1) { distribute = (M2[i][j].R - 1) / 4; M2[i][j].R = 1; /*Dá para parelelizar os if's abaixo*/ /*Os if's checam se os vizinhos não estão na borda e não serão estourados*/ if (i-1 > 0 && M2[i-1][j].R + distribute < 1) M2[i-1][j].R += distribute; if (i+1 < lines && M2[i+1][j].R + distribute < 1) M2[i+1][j].R += distribute; if (j-1 > 0 && M2[i][j-1].R + distribute < 1) M2[i][j-1].R += distribute; if (j+1 < columns && M2[i][j+1].R + distribute < 1) M2[i][j+1].R += distribute; } /*Checa o B*/ if (M2[i][j].B > 1) { distribute = (M2[i][j].B - 1) / 4; M2[i][j].B = 1; /*Os if's checam se os vizinhos não estão na borda e não serão estourados*/ if (i-1 > 0 && M2[i-1][j].B + distribute < 1) M2[i-1][j].B += distribute; if (i+1 < lines && M2[i+1][j].B + distribute < 1) M2[i+1][j].B += distribute; if (j-1 > 0 && M2[i][j-1].B + distribute < 1) M2[i][j-1].B += distribute; if (j+1 < columns && M2[i][j+1].B + distribute < 1) M2[i][j+1].B += distribute; } } } /*Laço para atualizar G*/ for (i = 1; i < lines - 1; i++) { #pragma omp parallel for num_threads(nr_threads) schedule(dynamic) for (j = 1; j < columns - 1; j++) { gx = M2[i][j].Rx + M2[i][j].Bx; gy = M2[i][j].Ry + M2[i][j].By; g = sqrt((gx*gx) + (gy*gy)); M2[i][j].G += g; if (M2[i][j].G > 2 * PI) M2[i][j].G -= 2 * PI; } } } /*Feito isso, checar se algum valor ultrapassou 1 *ou ficou negativo (embora provavelmente não dê para *ficar negativo)*/ /*Escreve no arquivo de saída*/ arq2 = fopen(outfile, "w"); if (arq2 == NULL) printf("Erro, não foi possível abrir o arquivo\n"); else { /*sprintf(outfile, "%s.ppm", outfile);*/ fprintf(arq2, "P3\n%d %d\n255\n", columns, lines); for (i = 0; i < lines; i++) { for (j = 0; j < columns; j++) { fprintf(arq2, "%.3f %.3f %.3f ", (float)(RGB_SIZE* M2[i][j].R), (float)((RGB_SIZE* M2[i][j].G) / (2*PI)), (float)(RGB_SIZE* M2[i][j].B)); } fprintf(arq2, "\n"); } fprintf(stdout, "A imagem foi salva no arquivo: %s\n", outfile); fclose(arq2); } for (i = 0; i < lines; i++) { free(M[i]); free(M2[i]); } free(M); free(M2); return 0; }
FalseSharing.c
#include<stdlib.h> #include<stdio.h> #include<omp.h> struct s { float value; int pad[NUMPAD]; }Array[4]; int main () { int i,j; i=0;j=0; const int SomeBigNumber = 100000000; // keep less than 2B omp_set_num_threads(NUMT); double time0=omp_get_wtime(); #pragma omp parallel for default(none) private (i,j) shared(Array) for(i = 0; i < 4; i++) { unsigned int seed = 0; for(j = 0; j < SomeBigNumber; j++ ) { Array[ i ].value = Array[ i ].value + (float)rand_r(&seed); } } double time1=omp_get_wtime(); double execution_time=time1-time0; printf("Execution time = %lf\n",execution_time); return 0; }
omp_report_mask.c
/* Routine reports OpenMP process affinity information. Get thread number and cpus (cpu_ids) Create static space (proc_mask) to hold all masks (done in a single region) Determine the mask for each thread (insert it in proc_mask) print mask header (one thread in single region) print mask (one thread in single region) free spaces return */ #include <stdio.h> #include <omp.h> #include <unistd.h> #include <stdlib.h> #include <ctype.h> void omp_report_mask(){ int nthrds, thrd; //Thread info int ncpus, nel_set; static int ** proc_mask; int i,j, ierr; char * dummy; thrd = omp_get_thread_num(); nthrds = omp_get_num_threads(); ncpus = (int) sysconf(_SC_NPROCESSORS_ONLN); if(omp_get_num_procs() != ncpus){ printf("ERROR: ncpus_by_omp=%d, ncpus_sched=%d\n",omp_get_num_procs(),ncpus); exit(1); } #pragma omp single { proc_mask = malloc(sizeof(int*)*nthrds); for(i=0;i<nthrds;i++) proc_mask[i] = malloc(sizeof(int)*ncpus ); for(i=0;i<nthrds;i++) for(j=0;j<ncpus;j++) proc_mask[i][j] =0; } ierr = boundto(&nel_set,proc_mask[thrd]); #pragma omp barrier #pragma omp single { print_mask(1, dummy, 0, 0,0, ncpus, 1,nthrds, proc_mask[thrd]); //print header for(thrd=0;thrd<nthrds;thrd++) print_mask(0, dummy, 0, 0,thrd, ncpus, 1,nthrds, proc_mask[thrd]); if(nthrds>50) print_mask(1, dummy, 0, 0,0, ncpus, 1,nthrds, proc_mask[thrd]); //print header for(i=0;i<nthrds;i++) free( proc_mask[i]); free( proc_mask); } }
par_add_cycle.c
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * ParAMG cycling routine * *****************************************************************************/ #include "_hypre_parcsr_ls.h" #include "par_amg.h" /*-------------------------------------------------------------------------- * hypre_BoomerAMGCycle *--------------------------------------------------------------------------*/ HYPRE_Int hypre_BoomerAMGAdditiveCycle( void *amg_vdata) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata; /* Data Structure variables */ hypre_ParCSRMatrix **A_array; hypre_ParCSRMatrix **P_array; hypre_ParCSRMatrix **R_array; hypre_ParCSRMatrix *Lambda; hypre_ParCSRMatrix *Atilde; hypre_ParVector **F_array; hypre_ParVector **U_array; hypre_ParVector *Vtemp; hypre_ParVector *Ztemp; hypre_ParVector *Xtilde, *Rtilde; hypre_IntArray **CF_marker_array; HYPRE_Int *CF_marker; HYPRE_Int num_levels; HYPRE_Int addlvl, add_end; HYPRE_Int additive; HYPRE_Int mult_additive; HYPRE_Int simple; HYPRE_Int add_last_lvl; HYPRE_Int i, j, num_rows; HYPRE_Int n_global; HYPRE_Int rlx_order; /* Local variables */ HYPRE_Int Solve_err_flag = 0; HYPRE_Int level; HYPRE_Int coarse_grid; HYPRE_Int fine_grid; HYPRE_Int rlx_down; HYPRE_Int rlx_up; HYPRE_Int rlx_coarse; HYPRE_Int *grid_relax_type; HYPRE_Int *num_grid_sweeps; hypre_Vector **l1_norms; HYPRE_Real alpha, beta; HYPRE_Real *u_data; HYPRE_Real *v_data; hypre_Vector *l1_norms_lvl; HYPRE_Real *D_inv; HYPRE_Real *x_global; HYPRE_Real *r_global; HYPRE_Real *relax_weight; HYPRE_Real *omega; #if 0 HYPRE_Real *D_mat; HYPRE_Real *S_vec; #endif HYPRE_ANNOTATE_FUNC_BEGIN; /* Acquire data and allocate storage */ A_array = hypre_ParAMGDataAArray(amg_data); F_array = hypre_ParAMGDataFArray(amg_data); U_array = hypre_ParAMGDataUArray(amg_data); P_array = hypre_ParAMGDataPArray(amg_data); R_array = hypre_ParAMGDataRArray(amg_data); CF_marker_array = hypre_ParAMGDataCFMarkerArray(amg_data); Vtemp = hypre_ParAMGDataVtemp(amg_data); Ztemp = hypre_ParAMGDataZtemp(amg_data); num_levels = hypre_ParAMGDataNumLevels(amg_data); additive = hypre_ParAMGDataAdditive(amg_data); mult_additive = hypre_ParAMGDataMultAdditive(amg_data); simple = hypre_ParAMGDataSimple(amg_data); add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data); grid_relax_type = hypre_ParAMGDataGridRelaxType(amg_data); Lambda = hypre_ParAMGDataLambda(amg_data); Atilde = hypre_ParAMGDataAtilde(amg_data); Xtilde = hypre_ParAMGDataXtilde(amg_data); Rtilde = hypre_ParAMGDataRtilde(amg_data); l1_norms = hypre_ParAMGDataL1Norms(amg_data); D_inv = hypre_ParAMGDataDinv(amg_data); relax_weight = hypre_ParAMGDataRelaxWeight(amg_data); omega = hypre_ParAMGDataOmega(amg_data); rlx_order = hypre_ParAMGDataRelaxOrder(amg_data); num_grid_sweeps = hypre_ParAMGDataNumGridSweeps(amg_data); /* Initialize */ addlvl = hypre_max(additive, mult_additive); addlvl = hypre_max(addlvl, simple); if (add_last_lvl == -1 ) add_end = num_levels-1; else add_end = add_last_lvl; Solve_err_flag = 0; /*--------------------------------------------------------------------- * Main loop of cycling --- multiplicative version --- V-cycle *--------------------------------------------------------------------*/ /* down cycle */ rlx_down = grid_relax_type[1]; rlx_up = grid_relax_type[2]; rlx_coarse = grid_relax_type[3]; for (level = 0; level < num_levels-1; level++) { HYPRE_ANNOTATE_MGLEVEL_BEGIN(level); fine_grid = level; coarse_grid = level + 1; u_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[fine_grid])); v_data = hypre_VectorData(hypre_ParVectorLocalVector(Vtemp)); l1_norms_lvl = l1_norms[level]; hypre_ParVectorSetConstantValues(U_array[coarse_grid], 0.0); if (level < addlvl || level > add_end) /* multiplicative version */ { /* smoothing step */ if (rlx_down == 0) { HYPRE_Real *A_data = hypre_CSRMatrixData(hypre_ParCSRMatrixDiag(A_array[fine_grid])); HYPRE_Int *A_i = hypre_CSRMatrixI(hypre_ParCSRMatrixDiag(A_array[fine_grid])); num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid])); for (j=0; j < num_grid_sweeps[1]; j++) { hypre_ParVectorCopy(F_array[fine_grid],Vtemp); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) u_data[i] = relax_weight[level]*v_data[i] / A_data[A_i[i]]; } } else if (rlx_down != 18) { /*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_down,0,*/ CF_marker = hypre_IntArrayData(CF_marker_array[fine_grid]); for (j=0; j < num_grid_sweeps[1]; j++) { hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid], CF_marker, rlx_down,rlx_order,1, relax_weight[fine_grid], omega[fine_grid], l1_norms[level] ? hypre_VectorData(l1_norms[level]) : NULL, U_array[fine_grid], Vtemp, Ztemp); hypre_ParVectorCopy(F_array[fine_grid],Vtemp); } } else { num_rows = hypre_CSRMatrixNumRows(hypre_ParCSRMatrixDiag(A_array[fine_grid])); for (j=0; j < num_grid_sweeps[1]; j++) { hypre_ParVectorCopy(F_array[fine_grid],Vtemp); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < num_rows; i++) { u_data[i] += v_data[i] / hypre_VectorData(l1_norms_lvl)[i]; } } } alpha = -1.0; beta = 1.0; hypre_ParCSRMatrixMatvec(alpha, A_array[fine_grid], U_array[fine_grid], beta, Vtemp); alpha = 1.0; beta = 0.0; hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp, beta,F_array[coarse_grid]); } else /* additive version */ { hypre_ParVectorCopy(F_array[fine_grid],Vtemp); if (level == 0) /* compute residual */ { hypre_ParVectorCopy(Vtemp, Rtilde); hypre_ParVectorCopy(U_array[fine_grid],Xtilde); } alpha = 1.0; beta = 0.0; hypre_ParCSRMatrixMatvecT(alpha,R_array[fine_grid],Vtemp, beta,F_array[coarse_grid]); } HYPRE_ANNOTATE_MGLEVEL_END(level); } /* additive smoothing and solve coarse grid */ HYPRE_ANNOTATE_MGLEVEL_BEGIN(num_levels - 1); if (addlvl < num_levels) { if (simple > -1) { x_global = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde)); r_global = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde)); n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Xtilde)); #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < n_global; i++) x_global[i] += D_inv[i]*r_global[i]; } else { if (num_grid_sweeps[1] > 1) { n_global = hypre_VectorSize(hypre_ParVectorLocalVector(Rtilde)); hypre_ParVector *Tmptilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); hypre_Vector *Tmptilde_local = hypre_SeqVectorCreate(n_global); hypre_SeqVectorInitialize(Tmptilde_local); hypre_ParVectorLocalVector(Tmptilde) = Tmptilde_local; hypre_ParVectorOwnsData(Tmptilde) = 1; hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 0.0, Tmptilde); hypre_ParVectorScale(2.0,Rtilde); hypre_ParCSRMatrixMatvec(-1.0, Atilde, Tmptilde, 1.0, Rtilde); hypre_ParVectorDestroy(Tmptilde); } hypre_ParCSRMatrixMatvec(1.0, Lambda, Rtilde, 1.0, Xtilde); } if (addlvl == 0) hypre_ParVectorCopy(Xtilde, U_array[0]); } if (add_end < num_levels -1) { fine_grid = num_levels -1; for (j=0; j < num_grid_sweeps[3]; j++) if (rlx_coarse == 18) hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid], 1, 1, l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL, 1.0, 1.0 ,0,0,0,0, U_array[fine_grid], Vtemp, Ztemp); else hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid], NULL, rlx_coarse,0,0, relax_weight[fine_grid], omega[fine_grid], l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL, U_array[fine_grid], Vtemp, Ztemp); } HYPRE_ANNOTATE_MGLEVEL_END(num_levels - 1); /* up cycle */ for (level = num_levels-1; level > 0; level--) { HYPRE_ANNOTATE_MGLEVEL_BEGIN(level); fine_grid = level - 1; coarse_grid = level; if (level <= addlvl || level > add_end+1) /* multiplicative version */ { alpha = 1.0; beta = 1.0; hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid], U_array[coarse_grid], beta, U_array[fine_grid]); if (rlx_up != 18) { /*hypre_BoomerAMGRelax(A_array[fine_grid],F_array[fine_grid],NULL,rlx_up,0,*/ CF_marker = hypre_IntArrayData(CF_marker_array[fine_grid]); for (j=0; j < num_grid_sweeps[2]; j++) { hypre_BoomerAMGRelaxIF(A_array[fine_grid],F_array[fine_grid], CF_marker, rlx_up,rlx_order,2, relax_weight[fine_grid], omega[fine_grid], l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL, U_array[fine_grid], Vtemp, Ztemp); } } else if (rlx_order) { CF_marker = hypre_IntArrayData(CF_marker_array[fine_grid]); HYPRE_Int loc_relax_points[2]; loc_relax_points[0] = -1; loc_relax_points[1] = 1; for (j=0; j < num_grid_sweeps[2]; j++) { for (i=0; i < 2; i++) { hypre_ParCSRRelax_L1_Jacobi(A_array[fine_grid],F_array[fine_grid], CF_marker, loc_relax_points[i], 1.0, l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL, U_array[fine_grid], Vtemp); } } } else for (j=0; j < num_grid_sweeps[2]; j++) hypre_ParCSRRelax(A_array[fine_grid], F_array[fine_grid], 1, 1, l1_norms[fine_grid] ? hypre_VectorData(l1_norms[fine_grid]) : NULL, 1.0, 1.0 ,0,0,0,0, U_array[fine_grid], Vtemp, Ztemp); } else /* additive version */ { alpha = 1.0; beta = 1.0; hypre_ParCSRMatrixMatvec(alpha, P_array[fine_grid], U_array[coarse_grid], beta, U_array[fine_grid]); } HYPRE_ANNOTATE_MGLEVEL_END(level); } HYPRE_ANNOTATE_FUNC_END; return(Solve_err_flag); } HYPRE_Int hypre_CreateLambda(void *amg_vdata) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata; /* Data Structure variables */ MPI_Comm comm; hypre_ParCSRMatrix **A_array; hypre_ParVector **F_array; hypre_ParVector **U_array; hypre_ParCSRMatrix *A_tmp; hypre_ParCSRMatrix *Lambda; hypre_CSRMatrix *L_diag; hypre_CSRMatrix *L_offd; hypre_ParCSRMatrix *Atilde; hypre_CSRMatrix *Atilde_diag; hypre_CSRMatrix *Atilde_offd; HYPRE_Real *Atilde_diag_data; HYPRE_Real *Atilde_offd_data; hypre_CSRMatrix *A_tmp_diag; hypre_CSRMatrix *A_tmp_offd; hypre_ParVector *Xtilde; hypre_ParVector *Rtilde; hypre_Vector *Xtilde_local; hypre_Vector *Rtilde_local; hypre_ParCSRCommPkg *comm_pkg; hypre_ParCSRCommPkg *L_comm_pkg = NULL; hypre_ParCSRCommHandle *comm_handle; HYPRE_Real *L_diag_data; HYPRE_Real *L_offd_data; HYPRE_Real *buf_data = NULL; HYPRE_Real *tmp_data; HYPRE_Real *x_data; HYPRE_Real *r_data; hypre_Vector *l1_norms; HYPRE_Real *A_tmp_diag_data; HYPRE_Real *A_tmp_offd_data; HYPRE_Real *D_data = NULL; HYPRE_Real *D_data_offd = NULL; HYPRE_Int *L_diag_i; HYPRE_Int *L_diag_j; HYPRE_Int *L_offd_i; HYPRE_Int *L_offd_j; HYPRE_Int *Atilde_diag_i; HYPRE_Int *Atilde_diag_j; HYPRE_Int *Atilde_offd_i; HYPRE_Int *Atilde_offd_j; HYPRE_Int *A_tmp_diag_i; HYPRE_Int *A_tmp_offd_i; HYPRE_Int *A_tmp_diag_j; HYPRE_Int *A_tmp_offd_j; HYPRE_Int *L_recv_ptr = NULL; HYPRE_Int *L_send_ptr = NULL; HYPRE_Int *L_recv_procs = NULL; HYPRE_Int *L_send_procs = NULL; HYPRE_Int *L_send_map_elmts = NULL; HYPRE_Int *recv_procs; HYPRE_Int *send_procs; HYPRE_Int *send_map_elmts; HYPRE_Int *send_map_starts; HYPRE_Int *recv_vec_starts; HYPRE_Int *all_send_procs = NULL; HYPRE_Int *all_recv_procs = NULL; HYPRE_Int *remap = NULL; HYPRE_Int *level_start; HYPRE_Int addlvl; HYPRE_Int additive; HYPRE_Int mult_additive; HYPRE_Int num_levels; HYPRE_Int num_add_lvls; HYPRE_Int num_procs; HYPRE_Int num_sends, num_recvs; HYPRE_Int num_sends_L = 0; HYPRE_Int num_recvs_L = 0; HYPRE_Int send_data_L = 0; HYPRE_Int num_rows_L = 0; HYPRE_Int num_rows_tmp = 0; HYPRE_Int num_cols_offd_L = 0; HYPRE_Int num_cols_offd = 0; HYPRE_Int level, i, j, k; HYPRE_Int this_proc, cnt, cnt_diag, cnt_offd; HYPRE_Int A_cnt_diag, A_cnt_offd; HYPRE_Int cnt_recv, cnt_send, cnt_row, row_start; HYPRE_Int start_diag, start_offd, indx, cnt_map; HYPRE_Int start, j_indx, index, cnt_level; HYPRE_Int max_sends, max_recvs; HYPRE_Int ns; /* Local variables */ HYPRE_Int Solve_err_flag = 0; HYPRE_Int num_nonzeros_diag; HYPRE_Int num_nonzeros_offd; hypre_Vector **l1_norms_ptr = NULL; /*HYPRE_Real *relax_weight = NULL; HYPRE_Int relax_type; */ HYPRE_Int add_rlx; HYPRE_Int add_last_lvl, add_end; HYPRE_Real add_rlx_wt; /* Acquire data and allocate storage */ A_array = hypre_ParAMGDataAArray(amg_data); F_array = hypre_ParAMGDataFArray(amg_data); U_array = hypre_ParAMGDataUArray(amg_data); additive = hypre_ParAMGDataAdditive(amg_data); mult_additive = hypre_ParAMGDataMultAdditive(amg_data); add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data); num_levels = hypre_ParAMGDataNumLevels(amg_data); /*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data); relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/ comm = hypre_ParCSRMatrixComm(A_array[0]); add_rlx = hypre_ParAMGDataAddRelaxType(amg_data); add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data); ns = hypre_ParAMGDataNumGridSweeps(amg_data)[1]; hypre_MPI_Comm_size(comm,&num_procs); l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data); addlvl = hypre_max(additive, mult_additive); if (add_last_lvl != -1) add_end = add_last_lvl+1; else add_end = num_levels; num_add_lvls = add_end+1-addlvl; level_start = hypre_CTAlloc(HYPRE_Int, num_add_lvls+1, HYPRE_MEMORY_HOST); send_data_L = 0; num_rows_L = 0; num_cols_offd_L = 0; num_nonzeros_diag = 0; num_nonzeros_offd = 0; level_start[0] = 0; cnt = 1; max_sends = 0; max_recvs = 0; for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp); A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag); A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); num_cols_offd = hypre_CSRMatrixNumCols(A_tmp_offd); num_rows_L += num_rows_tmp; level_start[cnt] = level_start[cnt-1] + num_rows_tmp; cnt++; num_cols_offd_L += num_cols_offd; num_nonzeros_diag += A_tmp_diag_i[num_rows_tmp]; num_nonzeros_offd += A_tmp_offd_i[num_rows_tmp]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); max_sends += num_sends; if (num_sends) send_data_L += hypre_ParCSRCommPkgSendMapStart(comm_pkg,num_sends); max_recvs += hypre_ParCSRCommPkgNumRecvs(comm_pkg); } } if (max_sends >= num_procs ||max_recvs >= num_procs) { max_sends = num_procs; max_recvs = num_procs; } if (max_sends) all_send_procs = hypre_CTAlloc(HYPRE_Int, max_sends, HYPRE_MEMORY_HOST); if (max_recvs) all_recv_procs = hypre_CTAlloc(HYPRE_Int, max_recvs, HYPRE_MEMORY_HOST); cnt_send = 0; cnt_recv = 0; if (max_sends || max_recvs) { if (max_sends < num_procs && max_recvs < num_procs) { for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); for (j = 0; j < num_sends; j++) all_send_procs[cnt_send++] = send_procs[j]; for (j = 0; j < num_recvs; j++) all_recv_procs[cnt_recv++] = recv_procs[j]; } } if (max_sends) { hypre_qsort0(all_send_procs, 0, max_sends-1); num_sends_L = 1; this_proc = all_send_procs[0]; for (i=1; i < max_sends; i++) { if (all_send_procs[i] > this_proc) { this_proc = all_send_procs[i]; all_send_procs[num_sends_L++] = this_proc; } } L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST); for (j=0; j < num_sends_L; j++) L_send_procs[j] = all_send_procs[j]; hypre_TFree(all_send_procs, HYPRE_MEMORY_HOST); } if (max_recvs) { hypre_qsort0(all_recv_procs, 0, max_recvs-1); num_recvs_L = 1; this_proc = all_recv_procs[0]; for (i=1; i < max_recvs; i++) { if (all_recv_procs[i] > this_proc) { this_proc = all_recv_procs[i]; all_recv_procs[num_recvs_L++] = this_proc; } } L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST); for (j=0; j < num_recvs_L; j++) L_recv_procs[j] = all_recv_procs[j]; hypre_TFree(all_recv_procs, HYPRE_MEMORY_HOST); } L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1, HYPRE_MEMORY_HOST); L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1, HYPRE_MEMORY_HOST); for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); } else { num_sends = 0; num_recvs = 0; } for (k = 0; k < num_sends; k++) { this_proc = hypre_BinarySearch(L_send_procs,send_procs[k],num_sends_L); L_send_ptr[this_proc+1] += send_map_starts[k+1]-send_map_starts[k]; } for (k = 0; k < num_recvs; k++) { this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[k],num_recvs_L); L_recv_ptr[this_proc+1] += recv_vec_starts[k+1]-recv_vec_starts[k]; } } L_recv_ptr[0] = 0; for (i=1; i < num_recvs_L; i++) L_recv_ptr[i+1] += L_recv_ptr[i]; L_send_ptr[0] = 0; for (i=1; i < num_sends_L; i++) L_send_ptr[i+1] += L_send_ptr[i]; } else { num_recvs_L = 0; num_sends_L = 0; for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); for (j = 0; j < num_sends; j++) { this_proc = send_procs[j]; if (all_send_procs[this_proc] == 0) num_sends_L++; all_send_procs[this_proc] += send_map_starts[j+1]-send_map_starts[j]; } for (j = 0; j < num_recvs; j++) { this_proc = recv_procs[j]; if (all_recv_procs[this_proc] == 0) num_recvs_L++; all_recv_procs[this_proc] += recv_vec_starts[j+1]-recv_vec_starts[j]; } } } if (max_sends) { L_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST); L_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends_L+1, HYPRE_MEMORY_HOST); num_sends_L = 0; for (j=0; j < num_procs; j++) { this_proc = all_send_procs[j]; if (this_proc) { L_send_procs[num_sends_L++] = j; L_send_ptr[num_sends_L] = this_proc + L_send_ptr[num_sends_L-1]; } } } if (max_recvs) { L_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST); L_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs_L+1, HYPRE_MEMORY_HOST); num_recvs_L = 0; for (j=0; j < num_procs; j++) { this_proc = all_recv_procs[j]; if (this_proc) { L_recv_procs[num_recvs_L++] = j; L_recv_ptr[num_recvs_L] = this_proc + L_recv_ptr[num_recvs_L-1]; } } } } } if (max_sends) hypre_TFree(all_send_procs, HYPRE_MEMORY_HOST); if (max_recvs) hypre_TFree(all_recv_procs, HYPRE_MEMORY_HOST); L_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag); L_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd); hypre_CSRMatrixInitialize(L_diag); hypre_CSRMatrixInitialize(L_offd); if (num_nonzeros_diag) { L_diag_data = hypre_CSRMatrixData(L_diag); L_diag_j = hypre_CSRMatrixJ(L_diag); } L_diag_i = hypre_CSRMatrixI(L_diag); if (num_nonzeros_offd) { L_offd_data = hypre_CSRMatrixData(L_offd); L_offd_j = hypre_CSRMatrixJ(L_offd); } L_offd_i = hypre_CSRMatrixI(L_offd); if (ns > 1) { Atilde_diag = hypre_CSRMatrixCreate(num_rows_L, num_rows_L, num_nonzeros_diag); Atilde_offd = hypre_CSRMatrixCreate(num_rows_L, num_cols_offd_L, num_nonzeros_offd); hypre_CSRMatrixInitialize(Atilde_diag); hypre_CSRMatrixInitialize(Atilde_offd); if (num_nonzeros_diag) { Atilde_diag_data = hypre_CSRMatrixData(Atilde_diag); Atilde_diag_j = hypre_CSRMatrixJ(Atilde_diag); } Atilde_diag_i = hypre_CSRMatrixI(Atilde_diag); if (num_nonzeros_offd) { Atilde_offd_data = hypre_CSRMatrixData(Atilde_offd); Atilde_offd_j = hypre_CSRMatrixJ(Atilde_offd); } Atilde_offd_i = hypre_CSRMatrixI(Atilde_offd); } if (num_rows_L) D_data = hypre_CTAlloc(HYPRE_Real, num_rows_L, HYPRE_MEMORY_HOST); if (send_data_L) { L_send_map_elmts = hypre_CTAlloc(HYPRE_Int, send_data_L, HYPRE_MEMORY_HOST); buf_data = hypre_CTAlloc(HYPRE_Real, send_data_L, HYPRE_MEMORY_HOST); } if (num_cols_offd_L) { D_data_offd = hypre_CTAlloc(HYPRE_Real, num_cols_offd_L, HYPRE_MEMORY_HOST); /*L_col_map_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L);*/ remap = hypre_CTAlloc(HYPRE_Int, num_cols_offd_L, HYPRE_MEMORY_HOST); } Rtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); Rtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Rtilde_local); hypre_ParVectorLocalVector(Rtilde) = Rtilde_local; hypre_ParVectorOwnsData(Rtilde) = 1; Xtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); Xtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Xtilde_local); hypre_ParVectorLocalVector(Xtilde) = Xtilde_local; hypre_ParVectorOwnsData(Xtilde) = 1; x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde)); r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde)); cnt = 0; cnt_level = 0; cnt_diag = 0; cnt_offd = 0; cnt_row = 1; L_diag_i[0] = 0; L_offd_i[0] = 0; if (ns > 1) { A_cnt_diag = 0; A_cnt_offd = 0; Atilde_diag_i[0] = 0; Atilde_offd_i[0] = 0; } for (level=addlvl; level < add_end; level++) { row_start = level_start[cnt_level]; if (level != 0) { tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])); if (tmp_data) { hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(F_array[level]))); } hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[row_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0; tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])); if (tmp_data) { hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(U_array[level]))); } hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[row_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0; } cnt_level++; start_diag = L_diag_i[cnt_row-1]; start_offd = L_offd_i[cnt_row-1]; A_tmp = A_array[level]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); A_tmp_offd = hypre_ParCSRMatrixOffd(A_tmp); comm_pkg = hypre_ParCSRMatrixCommPkg(A_tmp); A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag); A_tmp_offd_i = hypre_CSRMatrixI(A_tmp_offd); A_tmp_diag_j = hypre_CSRMatrixJ(A_tmp_diag); A_tmp_offd_j = hypre_CSRMatrixJ(A_tmp_offd); A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag); A_tmp_offd_data = hypre_CSRMatrixData(A_tmp_offd); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); if (comm_pkg) { num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg); num_recvs = hypre_ParCSRCommPkgNumRecvs(comm_pkg); send_procs = hypre_ParCSRCommPkgSendProcs(comm_pkg); recv_procs = hypre_ParCSRCommPkgRecvProcs(comm_pkg); send_map_starts = hypre_ParCSRCommPkgSendMapStarts(comm_pkg); send_map_elmts = hypre_ParCSRCommPkgSendMapElmts(comm_pkg); recv_vec_starts = hypre_ParCSRCommPkgRecvVecStarts(comm_pkg); } else { num_sends = 0; num_recvs = 0; } /* Compute new combined communication package */ for (i=0; i < num_sends; i++) { this_proc = hypre_BinarySearch(L_send_procs,send_procs[i],num_sends_L); indx = L_send_ptr[this_proc]; for (j=send_map_starts[i]; j < send_map_starts[i+1]; j++) { L_send_map_elmts[indx++] = row_start + send_map_elmts[j]; } L_send_ptr[this_proc] = indx; } cnt_map = 0; for (i = 0; i < num_recvs; i++) { this_proc = hypre_BinarySearch(L_recv_procs,recv_procs[i],num_recvs_L); indx = L_recv_ptr[this_proc]; for (j=recv_vec_starts[i]; j < recv_vec_starts[i+1]; j++) { remap[cnt_map++] = indx++; } L_recv_ptr[this_proc] = indx; } /* Compute Lambda */ if (add_rlx == 0) { /*HYPRE_Real rlx_wt = relax_weight[level];*/ #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) { D_data[i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]]; L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } if (ns > 1) for (i=0; i < num_rows_tmp; i++) { Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } } else { l1_norms = l1_norms_ptr[level]; #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) { D_data[i] = 1.0 / hypre_VectorData(l1_norms)[i]; L_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; L_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } if (ns > 1) { for (i=0; i < num_rows_tmp; i++) { Atilde_diag_i[cnt_row+i] = start_diag + A_tmp_diag_i[i+1]; Atilde_offd_i[cnt_row+i] = start_offd + A_tmp_offd_i[i+1]; } } } if (num_procs > 1) { index = 0; for (i=0; i < num_sends; i++) { start = send_map_starts[i]; for (j=start; j < send_map_starts[i+1]; j++) buf_data[index++] = D_data[send_map_elmts[j]]; } comm_handle = hypre_ParCSRCommHandleCreate(1, comm_pkg, buf_data, D_data_offd); hypre_ParCSRCommHandleDestroy(comm_handle); } for (i = 0; i < num_rows_tmp; i++) { j_indx = A_tmp_diag_i[i]; if (ns > 1) { Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j_indx]; Atilde_diag_j[A_cnt_diag++] = i+row_start; } L_diag_data[cnt_diag] = (2.0 - A_tmp_diag_data[j_indx]*D_data[i])*D_data[i]; L_diag_j[cnt_diag++] = i+row_start; for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++) { j_indx = A_tmp_diag_j[j]; L_diag_data[cnt_diag] = (- A_tmp_diag_data[j]*D_data[j_indx])*D_data[i]; L_diag_j[cnt_diag++] = j_indx+row_start; } for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++) { j_indx = A_tmp_offd_j[j]; L_offd_data[cnt_offd] = (- A_tmp_offd_data[j]*D_data_offd[j_indx])*D_data[i]; L_offd_j[cnt_offd++] = remap[j_indx]; } if (ns > 1) { for (j=A_tmp_diag_i[i]+1; j < A_tmp_diag_i[i+1]; j++) { j_indx = A_tmp_diag_j[j]; Atilde_diag_data[A_cnt_diag] = A_tmp_diag_data[j]; Atilde_diag_j[A_cnt_diag++] = j_indx+row_start; } for (j=A_tmp_offd_i[i]; j < A_tmp_offd_i[i+1]; j++) { j_indx = A_tmp_offd_j[j]; Atilde_offd_data[A_cnt_offd] = A_tmp_offd_data[j]; Atilde_offd_j[A_cnt_offd++] = remap[j_indx]; } } } cnt_row += num_rows_tmp; } if (L_send_ptr) { for (i=num_sends_L-1; i > 0; i--) L_send_ptr[i] = L_send_ptr[i-1]; L_send_ptr[0] = 0; } else L_send_ptr = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); if (L_recv_ptr) { for (i=num_recvs_L-1; i > 0; i--) L_recv_ptr[i] = L_recv_ptr[i-1]; L_recv_ptr[0] = 0; } else L_recv_ptr = hypre_CTAlloc(HYPRE_Int, 1, HYPRE_MEMORY_HOST); L_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); hypre_ParCSRCommPkgNumRecvs(L_comm_pkg) = num_recvs_L; hypre_ParCSRCommPkgNumSends(L_comm_pkg) = num_sends_L; hypre_ParCSRCommPkgRecvProcs(L_comm_pkg) = L_recv_procs; hypre_ParCSRCommPkgSendProcs(L_comm_pkg) = L_send_procs; hypre_ParCSRCommPkgRecvVecStarts(L_comm_pkg) = L_recv_ptr; hypre_ParCSRCommPkgSendMapStarts(L_comm_pkg) = L_send_ptr; hypre_ParCSRCommPkgSendMapElmts(L_comm_pkg) = L_send_map_elmts; hypre_ParCSRCommPkgComm(L_comm_pkg) = comm; Lambda = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDiag(Lambda) = L_diag; hypre_ParCSRMatrixOffd(Lambda) = L_offd; hypre_ParCSRMatrixCommPkg(Lambda) = L_comm_pkg; hypre_ParCSRMatrixComm(Lambda) = comm; hypre_ParCSRMatrixOwnsData(Lambda) = 1; if (ns > 1) { /*hypre_ParCSRCommPkg *A_comm_pkg = NULL; HYPRE_Int *A_recv_ptr = NULL; HYPRE_Int *A_send_ptr = NULL; HYPRE_Int *A_recv_procs = NULL; HYPRE_Int *A_send_procs = NULL; HYPRE_Int *A_send_map_elmts = NULL; A_comm_pkg = hypre_CTAlloc(hypre_ParCSRCommPkg, 1, HYPRE_MEMORY_HOST); A_recv_ptr = hypre_CTAlloc(HYPRE_Int, num_recvs+1, HYPRE_MEMORY_HOST); A_send_ptr = hypre_CTAlloc(HYPRE_Int, num_sends+1, HYPRE_MEMORY_HOST); A_recv_procs = hypre_CTAlloc(HYPRE_Int, num_recvs_L, HYPRE_MEMORY_HOST); A_send_procs = hypre_CTAlloc(HYPRE_Int, num_sends_L, HYPRE_MEMORY_HOST); A_send_map_elmts = hypre_CTAlloc(HYPRE_Int, L_send_ptr[num_sends_L], HYPRE_MEMORY_HOST); for (i=0; i<num_recvs_L+1; i++) A_recv_ptr[i] = L_recv_ptr[i]; for (i=0; i<num_sends_L+1; i++) A_send_ptr[i] = L_send_ptr[i]; for (i=0; i<num_recvs_L; i++) A_recv_procs[i] = L_recv_procs[i]; for (i=0; i<num_sends_L; i++) A_send_procs[i] = L_send_procs[i]; for (i=0; i < L_send_ptr[num_sends_L]; i++) A_send_map_elmts[i] = L_send_map_elmts[i]; hypre_ParCSRCommPkgNumRecvs(A_comm_pkg) = num_recvs_L; hypre_ParCSRCommPkgNumSends(A_comm_pkg) = num_sends_L; hypre_ParCSRCommPkgRecvProcs(A_comm_pkg) = A_recv_procs; hypre_ParCSRCommPkgSendProcs(A_comm_pkg) = A_send_procs; hypre_ParCSRCommPkgRecvVecStarts(A_comm_pkg) = A_recv_ptr; hypre_ParCSRCommPkgSendMapStarts(A_comm_pkg) = A_send_ptr; hypre_ParCSRCommPkgSendMapElmts(A_comm_pkg) = A_send_map_elmts; hypre_ParCSRCommPkgComm(A_comm_pkg) = comm; */ Atilde = hypre_CTAlloc(hypre_ParCSRMatrix, 1, HYPRE_MEMORY_HOST); hypre_ParCSRMatrixDiag(Atilde) = Atilde_diag; hypre_ParCSRMatrixOffd(Atilde) = Atilde_offd; hypre_ParCSRMatrixCommPkg(Atilde) = L_comm_pkg; hypre_ParCSRMatrixComm(Atilde) = comm; hypre_ParCSRMatrixOwnsData(Atilde) = 1; hypre_ParAMGDataAtilde(amg_data) = Atilde; } hypre_ParAMGDataLambda(amg_data) = Lambda; hypre_ParAMGDataRtilde(amg_data) = Rtilde; hypre_ParAMGDataXtilde(amg_data) = Xtilde; hypre_TFree(D_data_offd, HYPRE_MEMORY_HOST); hypre_TFree(D_data, HYPRE_MEMORY_HOST); if (num_procs > 1) hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_TFree(remap, HYPRE_MEMORY_HOST); hypre_TFree(buf_data, HYPRE_MEMORY_HOST); hypre_TFree(level_start, HYPRE_MEMORY_HOST); return Solve_err_flag; } HYPRE_Int hypre_CreateDinv(void *amg_vdata) { hypre_ParAMGData *amg_data = (hypre_ParAMGData*) amg_vdata; /* Data Structure variables */ hypre_ParCSRMatrix **A_array; hypre_ParVector **F_array; hypre_ParVector **U_array; hypre_ParCSRMatrix *A_tmp; hypre_CSRMatrix *A_tmp_diag; hypre_ParVector *Xtilde; hypre_ParVector *Rtilde; hypre_Vector *Xtilde_local; hypre_Vector *Rtilde_local; HYPRE_Real *x_data; HYPRE_Real *r_data; HYPRE_Real *tmp_data; HYPRE_Real *D_inv = NULL; /*HYPRE_Real *relax_weight = NULL; HYPRE_Real relax_type;*/ HYPRE_Int addlvl; HYPRE_Int num_levels; HYPRE_Int num_rows_L; HYPRE_Int num_rows_tmp; HYPRE_Int level, i; HYPRE_Int add_rlx; HYPRE_Real add_rlx_wt; HYPRE_Int add_last_lvl, add_end; /* Local variables */ HYPRE_Int Solve_err_flag = 0; hypre_Vector **l1_norms_ptr = NULL; hypre_Vector *l1_norms; HYPRE_Int l1_start; /* Acquire data and allocate storage */ A_array = hypre_ParAMGDataAArray(amg_data); F_array = hypre_ParAMGDataFArray(amg_data); U_array = hypre_ParAMGDataUArray(amg_data); addlvl = hypre_ParAMGDataSimple(amg_data); num_levels = hypre_ParAMGDataNumLevels(amg_data); add_rlx_wt = hypre_ParAMGDataAddRelaxWt(amg_data); add_rlx = hypre_ParAMGDataAddRelaxType(amg_data); add_last_lvl = hypre_ParAMGDataAddLastLvl(amg_data); /*relax_weight = hypre_ParAMGDataRelaxWeight(amg_data); relax_type = hypre_ParAMGDataGridRelaxType(amg_data)[1];*/ l1_norms_ptr = hypre_ParAMGDataL1Norms(amg_data); /* smooth_option = hypre_ParAMGDataSmoothOption(amg_data); */ if (add_last_lvl == -1 ) add_end = num_levels; else add_end = add_last_lvl; num_rows_L = 0; for (i=addlvl; i < add_end; i++) { A_tmp = A_array[i]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); num_rows_L += num_rows_tmp; } Rtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); Rtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Rtilde_local); hypre_ParVectorLocalVector(Rtilde) = Rtilde_local; hypre_ParVectorOwnsData(Rtilde) = 1; Xtilde = hypre_CTAlloc(hypre_ParVector, 1, HYPRE_MEMORY_HOST); Xtilde_local = hypre_SeqVectorCreate(num_rows_L); hypre_SeqVectorInitialize(Xtilde_local); hypre_ParVectorLocalVector(Xtilde) = Xtilde_local; hypre_ParVectorOwnsData(Xtilde) = 1; x_data = hypre_VectorData(hypre_ParVectorLocalVector(Xtilde)); r_data = hypre_VectorData(hypre_ParVectorLocalVector(Rtilde)); D_inv = hypre_CTAlloc(HYPRE_Real, num_rows_L, HYPRE_MEMORY_HOST); l1_start = 0; for (level=addlvl; level < add_end; level++) { if (level != 0) { tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])); if (tmp_data) { hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(F_array[level]))); } hypre_VectorData(hypre_ParVectorLocalVector(F_array[level])) = &r_data[l1_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(F_array[level])) = 0; tmp_data = hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])); if (tmp_data) { hypre_TFree(tmp_data, hypre_VectorMemoryLocation(hypre_ParVectorLocalVector(U_array[level]))); } hypre_VectorData(hypre_ParVectorLocalVector(U_array[level])) = &x_data[l1_start]; hypre_VectorOwnsData(hypre_ParVectorLocalVector(U_array[level])) = 0; } A_tmp = A_array[level]; A_tmp_diag = hypre_ParCSRMatrixDiag(A_tmp); num_rows_tmp = hypre_CSRMatrixNumRows(A_tmp_diag); if (add_rlx == 0) { /*HYPRE_Real rlx_wt = relax_weight[level];*/ HYPRE_Int *A_tmp_diag_i = hypre_CSRMatrixI(A_tmp_diag); HYPRE_Real *A_tmp_diag_data = hypre_CSRMatrixData(A_tmp_diag); #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) { D_inv[l1_start+i] = add_rlx_wt/A_tmp_diag_data[A_tmp_diag_i[i]]; } } else { l1_norms = l1_norms_ptr[level]; #ifdef HYPRE_USING_OPENMP #pragma omp for private(i) HYPRE_SMP_SCHEDULE #endif for (i=0; i < num_rows_tmp; i++) { D_inv[l1_start+i] = 1.0 / hypre_VectorData(l1_norms)[i]; } } l1_start += num_rows_tmp; } hypre_ParAMGDataDinv(amg_data) = D_inv; hypre_ParAMGDataRtilde(amg_data) = Rtilde; hypre_ParAMGDataXtilde(amg_data) = Xtilde; return Solve_err_flag; }
GB_binop__land_uint32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__land_uint32) // A.*B function (eWiseMult): GB (_AemultB) // A.*B function (eWiseMult): GB (_AemultB_02__land_uint32) // A.*B function (eWiseMult): GB (_AemultB_03__land_uint32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__land_uint32) // A*D function (colscale): GB (_AxD__land_uint32) // D*A function (rowscale): GB (_DxB__land_uint32) // C+=B function (dense accum): GB (_Cdense_accumB__land_uint32) // C+=b function (dense accum): GB (_Cdense_accumb__land_uint32) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__land_uint32) // C=scalar+B GB (_bind1st__land_uint32) // C=scalar+B' GB (_bind1st_tran__land_uint32) // C=A+scalar GB (_bind2nd__land_uint32) // C=A'+scalar GB (_bind2nd_tran__land_uint32) // C type: uint32_t // A type: uint32_t // B,b type: uint32_t // BinaryOp: cij = ((aij != 0) && (bij != 0)) #define GB_ATYPE \ uint32_t #define GB_BTYPE \ uint32_t #define GB_CTYPE \ uint32_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint32_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint32_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint32_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y, i, j) \ z = ((x != 0) && (y != 0)) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LAND || GxB_NO_UINT32 || GxB_NO_LAND_UINT32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__land_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__land_uint32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__land_uint32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint32_t uint32_t bwork = (*((uint32_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__land_uint32) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__land_uint32) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *restrict Cx = (uint32_t *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__land_uint32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_01__land_uint32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_01_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__land_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_03__land_uint32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_03_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__land_uint32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__land_uint32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t x = (*((uint32_t *) x_input)) ; uint32_t *Bx = (uint32_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Bb, p)) continue ; uint32_t bij = Bx [p] ; Cx [p] = ((x != 0) && (bij != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__land_uint32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint32_t *Cx = (uint32_t *) Cx_output ; uint32_t *Ax = (uint32_t *) Ax_input ; uint32_t y = (*((uint32_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint32_t aij = Ax [p] ; Cx [p] = ((aij != 0) && (y != 0)) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = ((x != 0) && (aij != 0)) ; \ } GrB_Info GB (_bind1st_tran__land_uint32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint32_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t x = (*((const uint32_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint32_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint32_t aij = Ax [pA] ; \ Cx [pC] = ((aij != 0) && (y != 0)) ; \ } GrB_Info GB (_bind2nd_tran__land_uint32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint32_t y = (*((const uint32_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
ncra.c
/* $Header$ */ /* This single source file compiles into one executable that behaves as three different commands depending on invocation name: ncra -- netCDF record averager nces -- netCDF ensemble statistics ncrcat -- netCDF record concatenator */ /* Purpose: Compute averages or extract series of specified hyperslabs of specfied variables of multiple input netCDF files and output them to a single file. */ /* Copyright (C) 1995--present Charlie Zender This file is part of NCO, the netCDF Operators. NCO is free software. You may redistribute and/or modify NCO under the terms of the 3-Clause BSD License. You are permitted to link NCO with the HDF, netCDF, OPeNDAP, and UDUnits libraries and to distribute the resulting executables under the terms of the BSD, but in addition obeying the extra stipulations of the HDF, netCDF, OPeNDAP, and UDUnits licenses. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 3-Clause BSD License for more details. The original author of this software, Charlie Zender, seeks to improve it with your suggestions, contributions, bug-reports, and patches. Please contact the NCO project at http://nco.sf.net or write to Charlie Zender Department of Earth System Science University of California, Irvine Irvine, CA 92697-3100 */ /* URL: https://github.com/nco/nco/tree/master/src/nco/ncra.c Usage: ncra -O -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc ncra -O -n 3,4,1 -p ${HOME}/nco/data -l ${HOME} h0001.nc ~/foo.nc ncra -O -n 3,4,1 -p /ZENDER/tmp -l ${HOME}/nco/data h0001.nc ~/foo.nc ncrcat -O -C -d time,0,5,4,2 -v time -p ~/nco/data in.nc ~/foo.nc ncra -O -C -d time,0,5,4,2 -v time -p ~/nco/data in.nc ~/foo.nc ncra -O -C --mro -d time,0,5,4,2 -v time -p ~/nco/data in.nc ~/foo.nc ncra -O -w 1,2,3 -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc ncra -O -w one_dmn_rec_var -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc scp ~/nco/src/nco/ncra.c esmf.ess.uci.edu:nco/src/nco nces in.nc in.nc ~/foo.nc nces -O -n 3,4,1 -p ${HOME}/nco/data h0001.nc ~/foo.nc nces -O -n 3,4,1 -p ${HOME}/nco/data -l ${HOME} h0001.nc ~/foo.nc nces -O -n 3,4,1 -p /ZENDER/tmp -l ${HOME} h0001.nc ~/foo.nc ncra -Y ncge -O -p ~/nco/data mdl_1.nc ~/foo.nc ncra -Y ncge -O --nsm_sfx=_avg -p ~/nco/data mdl_1.nc ~/foo.nc */ #ifdef HAVE_CONFIG_H # include <config.h> /* Autotools tokens */ #endif /* !HAVE_CONFIG_H */ /* Standard C headers */ #include <math.h> /* sin cos cos sin 3.14159 */ #include <stdio.h> /* stderr, FILE, NULL, etc. */ #include <stdlib.h> /* abs, getopt, malloc, strtol */ #include <string.h> /* strcmp() */ #include <sys/stat.h> /* stat() */ #include <time.h> /* machine time */ #ifndef _MSC_VER # include <unistd.h> /* POSIX stuff */ #endif #ifndef HAVE_GETOPT_LONG # include "nco_getopt.h" #else /* HAVE_GETOPT_LONG */ # ifdef HAVE_GETOPT_H # include <getopt.h> # endif /* !HAVE_GETOPT_H */ #endif /* HAVE_GETOPT_LONG */ #ifdef I18N # include <langinfo.h> /* nl_langinfo() */ # include <libintl.h> /* Internationalization i18n */ # include <locale.h> /* Locale setlocale() */ # define _(sng) gettext (sng) # define gettext_noop(sng) (sng) # define N_(sng) gettext_noop(sng) #endif /* I18N */ /* Supply stub gettext() function in case i18n failed */ #ifndef _LIBINTL_H # define gettext(foo) foo #endif /* _LIBINTL_H */ /* 3rd party vendors */ #include <netcdf.h> /* netCDF definitions and C library */ #ifdef ENABLE_MPI # include <mpi.h> /* MPI definitions */ # include <netcdf_par.h> /* Parallel netCDF definitions */ # include "nco_mpi.h" /* MPI utilities */ #endif /* !ENABLE_MPI */ /* Personal headers */ /* #define MAIN_PROGRAM_FILE MUST precede #include libnco.h */ #define MAIN_PROGRAM_FILE #include "nco.h" /* netCDF Operator (NCO) definitions */ #include "libnco.h" /* netCDF Operator (NCO) library */ /* Define inline'd functions in header so source is visible to calling files C99 only: Declare prototype in exactly one header http://www.drdobbs.com/the-new-c-inline-functions/184401540 */ extern int min_int(int a, int b); extern int max_int(int a, int b); inline int min_int(int a, int b){return (a < b) ? a : b;} inline int max_int(int a, int b){return (a > b) ? a : b;} extern long min_lng(long a, long b); extern long max_lng(long a, long b); inline long min_lng(long a, long b){return (a < b) ? a : b;} inline long max_lng(long a, long b){return (a > b) ? a : b;} int main(int argc,char **argv) { char **fl_lst_abb=NULL; /* Option n */ char **fl_lst_in; char **gaa_arg=NULL; /* [sng] Global attribute arguments */ char **grp_lst_in=NULL_CEWI; char **var_lst_in=NULL_CEWI; char **wgt_lst_in=NULL_CEWI; char *aux_arg[NC_MAX_DIMS]; char *cmd_ln; char *clm_nfo_sng=NULL; /* [sng] Climatology information string */ char *cnk_arg[NC_MAX_DIMS]; char *cnk_map_sng=NULL_CEWI; /* [sng] Chunking map */ char *cnk_plc_sng=NULL_CEWI; /* [sng] Chunking policy */ char *fl_in=NULL; char *fl_out=NULL; /* Option o */ char *fl_out_tmp=NULL_CEWI; char *fl_pth=NULL; /* Option p */ char *fl_pth_lcl=NULL; /* Option l */ char *grp_out_fll=NULL; /* [sng] Group name */ char *lmt_arg[NC_MAX_DIMS]; char *nco_op_typ_sng=NULL_CEWI; /* [sng] Operation type Option y */ char *nco_pck_plc_sng=NULL_CEWI; /* [sng] Packing policy Option P */ char *nsm_sfx=NULL; /* [sng] Ensemble suffix */ char *opt_crr=NULL; /* [sng] String representation of current long-option name */ char *optarg_lcl=NULL; /* [sng] Local copy of system optarg */ char *ppc_arg[NC_MAX_VARS]; /* [sng] PPC arguments */ char *sng_cnv_rcd=NULL_CEWI; /* [sng] strtol()/strtoul() return code */ char *wgt_nm=NULL_CEWI; /* [sng] Weight variable */ char trv_pth[]="/"; /* [sng] Root path of traversal tree */ const char * const CVS_Id="$Id$"; const char * const CVS_Revision="$Revision$"; const char * const opt_sht_lst="34567ACcD:d:FG:g:HhL:l:Nn:Oo:p:P:rRt:v:w:X:xY:y:-:"; clm_bnd_sct *cb=NULL; cnk_sct cnk; /* [sct] Chunking structure */ cnv_sct *cnv; /* [sct] Convention structure */ #if defined(__cplusplus) || defined(PGI_CC) ddra_info_sct ddra_info; ddra_info.flg_ddra=False; #else /* !__cplusplus */ ddra_info_sct ddra_info={.flg_ddra=False}; #endif /* !__cplusplus */ dmn_sct **dim=NULL; /* CEWI */ dmn_sct **dmn_out=NULL; /* CEWI */ double *wgt_arr=NULL; /* Option w */ double wgt_avg_scl=0.0; /* [frc] Scalar version of wgt_avg */ extern char *optarg; extern int optind; /* Using naked stdin/stdout/stderr in parallel region generates warning Copy appropriate filehandle to variable scoped shared in parallel clause */ FILE * const fp_stderr=stderr; /* [fl] stderr filehandle CEWI */ FILE * const fp_stdout=stdout; /* [fl] stdout filehandle CEWI */ gpe_sct *gpe=NULL; /* [sng] Group Path Editing (GPE) structure */ int *in_id_arr; const int rec_dmn_idx=0; /* [idx] Assumed index of current record dimension where zero assumes record is leading dimension */ int abb_arg_nbr=0; int aux_nbr=0; /* [nbr] Number of auxiliary coordinate hyperslabs specified */ int cnk_map=nco_cnk_map_nil; /* [enm] Chunking map */ int cnk_nbr=0; /* [nbr] Number of chunk sizes */ int cnk_plc=nco_cnk_plc_nil; /* [enm] Chunking policy */ int dfl_lvl=NCO_DFL_LVL_UNDEFINED; /* [enm] Deflate level */ int dmn_rec_fl; int fl_idx; int fl_in_fmt; /* [enm] Input file format */ int fl_nbr=0; int fl_out_fmt=NCO_FORMAT_UNDEFINED; /* [enm] Output file format */ int flg_input_complete_nbr=0; /* [nbr] Number of record dimensions completed */ int fll_md_old; /* [enm] Old fill mode */ int gaa_nbr=0; /* [nbr] Number of global attributes to add */ int grp_id; /* [ID] Group ID */ int grp_lst_in_nbr=0; /* [nbr] Number of groups explicitly specified by user */ int grp_out_id; /* [ID] Group ID (output) */ int idx=int_CEWI; int idx_rec=0; /* [idx] Index that iterates over number of record dimensions */ int in_id; int lmt_nbr=0; /* Option d. NB: lmt_nbr gets incremented */ int log_lvl=0; /* [enm] netCDF library debugging verbosity [0..5] */ int md_open; /* [enm] Mode flag for nc_open() call */ int nbr_dmn_fl; int nbr_dmn_xtr=0; int nbr_rec; /* [nbr] (ncra) Number of record dimensions */ int nbr_var_fix; /* nbr_var_fix gets incremented */ int nbr_var_fl; int nbr_var_prc; /* nbr_var_prc gets incremented */ int nco_op_typ=nco_op_avg; /* [enm] Default operation is averaging */ int nco_pck_plc=nco_pck_plc_nil; /* [enm] Default packing is none */ int opt; int out_id; int ppc_nbr=0; /* [nbr] Number of PPC arguments */ int rcd=NC_NOERR; /* [rcd] Return code */ int thr_idx; /* [idx] Index of current thread */ int thr_nbr=int_CEWI; /* [nbr] Thread number Option t */ int var_lst_in_nbr=0; int var_out_id; /* [ID] Variable ID (output) */ int wgt_nbr=0; int xtr_nbr=0; /* xtr_nbr won't otherwise be set for -c with no -v */ lmt_sct **lmt_rec=NULL; /* [lst] (ncra) Record dimensions */ long idx_rec_crr_in; /* [idx] Index of current record in current input file */ long *idx_rec_out=NULL; /* [idx] Index of current record in output file (0 is first, ...) */ long ilv_srd; /* [idx] Interleave stride */ long *rec_in_cml=NULL; /* [nbr] Number of records, read or not, in all processed files */ long *rec_usd_cml=NULL; /* [nbr] Cumulative number of input records used (catenated by ncrcat or operated on by ncra) */ long rec_dmn_sz=0L; /* [idx] Size of record dimension, if any, in current file (increments by srd) */ long rec_rmn_prv_ssc=0L; /* [idx] Records remaining to be read in current subcycle group */ long rec_rmn_prv_ilv=0L; /* [idx] Records remaining to be read in current interleaved index */ md5_sct *md5=NULL; /* [sct] MD5 configuration */ nco_bool *REC_LST_DSR=NULL; /* [flg] Record is last desired from all input files */ nco_bool *flg_input_complete=NULL; /* [flg] All requested records in record dimension have been read */ nco_bool CNV_ARM; nco_bool EXCLUDE_INPUT_LIST=False; /* Option c */ nco_bool EXTRACT_ALL_COORDINATES=False; /* Option c */ nco_bool EXTRACT_ASSOCIATED_COORDINATES=True; /* Option C */ nco_bool EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */ nco_bool EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */ nco_bool FLG_BFR_NRM=False; /* [flg] Current output buffers need normalization */ nco_bool FLG_ILV=False; /* [flg] Interleave Output */ nco_bool FLG_MRO=False; /* [flg] Multi-Record Output */ nco_bool FLG_MSO=False; /* [flg] Multi-Subcycle Output */ nco_bool FL_LST_IN_APPEND=True; /* Option H */ nco_bool FL_LST_IN_FROM_STDIN=False; /* [flg] fl_lst_in comes from stdin */ nco_bool FL_RTR_RMT_LCN; nco_bool FORCE_APPEND=False; /* Option A */ nco_bool FORCE_OVERWRITE=False; /* Option O */ nco_bool FORTRAN_IDX_CNV=False; /* Option F */ nco_bool GRP_VAR_UNN=False; /* [flg] Select union of specified groups and variables */ nco_bool HISTORY_APPEND=True; /* Option h */ nco_bool HPSS_TRY=False; /* [flg] Search HPSS for unfound files */ nco_bool MSA_USR_RDR=False; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ nco_bool NORMALIZE_BY_WEIGHT=True; /* [flg] Normalize by command-line weight */ nco_bool NRM_BY_DNM=True; /* [flg] Normalize by denominator */ nco_bool PROMOTE_INTS=False; /* [flg] Promote integers to floating point in output */ nco_bool RAM_CREATE=False; /* [flg] Create file in RAM */ nco_bool RAM_OPEN=False; /* [flg] Open (netCDF3-only) file(s) in RAM */ nco_bool SHARE_CREATE=False; /* [flg] Create (netCDF3-only) file(s) with unbuffered I/O */ nco_bool SHARE_OPEN=False; /* [flg] Open (netCDF3-only) file(s) with unbuffered I/O */ nco_bool REC_APN=False; /* [flg] Append records directly to output file */ nco_bool REC_FRS_GRP=False; /* [flg] Record is first in current group */ nco_bool REC_LST_GRP=False; /* [flg] Record is last in current group */ nco_bool REC_SRD_LST=False; /* [flg] Record belongs to last stride of current file */ nco_bool RM_RMT_FL_PST_PRC=True; /* Option R */ nco_bool WRT_TMP_FL=True; /* [flg] Write output to temporary file */ nco_bool flg_cll_mth=True; /* [flg] Add/modify cell_methods attributes */ nco_bool flg_cb=False; /* [flg] Climatology bounds */ nco_bool flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ nco_bool flg_skp1; /* [flg] Current record is not dimension of this variable */ nco_bool flg_skp2; /* [flg] Current record is not dimension of this variable */ nco_bool flg_wgt_by_rec_not_by_fl=False; /* [flg] Weight each record (not file) by command-line numeric weights, if any */ nco_dmn_dne_t *flg_dne=NULL; /* [lst] Flag to check if input dimension -d "does not exist" */ nco_int base_time_srt=nco_int_CEWI; nco_int base_time_crr=nco_int_CEWI; nc_type var_prc_typ_pre_prm=NC_NAT; /* [enm] Type of variable before promotion */ nc_type var_typ_out=NC_NAT; /* [enm] Type of variable in output file */ scv_sct wgt_scv; scv_sct wgt_avg_scv; size_t bfr_sz_hnt=NC_SIZEHINT_DEFAULT; /* [B] Buffer size hint */ size_t cnk_csh_byt=NCO_CNK_CSH_BYT_DFL; /* [B] Chunk cache size */ size_t cnk_min_byt=NCO_CNK_SZ_MIN_BYT_DFL; /* [B] Minimize size of variable to chunk */ size_t cnk_sz_byt=0UL; /* [B] Chunk size in bytes */ size_t cnk_sz_scl=0UL; /* [nbr] Chunk size scalar */ size_t hdr_pad=0UL; /* [B] Pad at end of header section */ trv_sct *var_trv; /* [sct] Variable GTT object */ trv_tbl_sct *trv_tbl; /* [lst] Traversal table */ var_sct **var; var_sct **var_fix; var_sct **var_fix_out; var_sct **var_out=NULL_CEWI; var_sct **var_prc; var_sct **var_prc_out; var_sct *wgt=NULL; /* [sct] Raw weight on disk in input file */ var_sct *wgt_out=NULL; /* [sct] Copy of wgt Tally and val members malloc'd & initialized IDs updated each new file by nco_var_mtd_refresh() in file loop Current record value obtained by nco_msa_var_get_rec_trv() in record loop One copy of wgt_out used for all variables */ var_sct *wgt_avg=NULL; /* [sct] Copy of wgt_out created to mimic var_prc_out processing Holds running total and tally of weight Acts as op2 for wgt_out averaging just before var_prc[nbr_var_prc-1] */ #ifdef ENABLE_MPI /* Declare all MPI-specific variables here */ MPI_Comm mpi_cmm=MPI_COMM_WORLD; /* [prc] Communicator */ int prc_rnk; /* [idx] Process rank */ int prc_nbr=0; /* [nbr] Number of MPI processes */ #endif /* !ENABLE_MPI */ static struct option opt_lng[]={ /* Structure ordered by short option key if possible */ /* Long options with no argument, no short option counterpart */ {"cll_msr",no_argument,0,0}, /* [flg] Extract cell_measures variables */ {"cell_measures",no_argument,0,0}, /* [flg] Extract cell_measures variables */ {"no_cll_msr",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */ {"no_cell_measures",no_argument,0,0}, /* [flg] Do not extract cell_measures variables */ {"frm_trm",no_argument,0,0}, /* [flg] Extract formula_terms variables */ {"formula_terms",no_argument,0,0}, /* [flg] Extract formula_terms variables */ {"no_frm_trm",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */ {"no_formula_terms",no_argument,0,0}, /* [flg] Do not extract formula_terms variables */ {"cll_mth",no_argument,0,0}, /* [flg] Add/modify cell_methods attributes */ {"cell_methods",no_argument,0,0}, /* [flg] Add/modify cell_methods attributes */ {"no_cll_mth",no_argument,0,0}, /* [flg] Do not add/modify cell_methods attributes */ {"no_cell_methods",no_argument,0,0}, /* [flg] Do not add/modify cell_methods attributes */ {"clean",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"mmr_cln",no_argument,0,0}, /* [flg] Clean memory prior to exit */ {"drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"dirty",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"mmr_drt",no_argument,0,0}, /* [flg] Allow dirty memory on exit */ {"dbl",no_argument,0,0}, /* [flg] Arithmetic convention: promote float to double */ {"flt",no_argument,0,0}, /* [flg] Arithmetic convention: keep single-precision */ {"rth_dbl",no_argument,0,0}, /* [flg] Arithmetic convention: promote float to double */ {"rth_flt",no_argument,0,0}, /* [flg] Arithmetic convention: keep single-precision */ {"hdf4",no_argument,0,0}, /* [flg] Treat file as HDF4 */ {"hdf_upk",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ {"hdf_unpack",no_argument,0,0}, /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ {"help",no_argument,0,0}, {"hlp",no_argument,0,0}, {"hpss_try",no_argument,0,0}, /* [flg] Search HPSS for unfound files */ {"md5_dgs",no_argument,0,0}, /* [flg] Perform MD5 digests */ {"md5_digest",no_argument,0,0}, /* [flg] Perform MD5 digests */ {"mro",no_argument,0,0}, /* [flg] Multi-Record Output */ {"mso",no_argument,0,0}, /* [flg] Multi-Subcycle Output */ {"multi_record_output",no_argument,0,0}, /* [flg] Multi-Record Output */ {"multi_subcycle_output",no_argument,0,0}, /* [flg] Multi-Subcycle Output */ {"msa_usr_rdr",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ {"msa_user_order",no_argument,0,0}, /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ {"nsm_fl",no_argument,0,0}, {"nsm_grp",no_argument,0,0}, {"ram_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"create_ram",no_argument,0,0}, /* [flg] Create file in RAM */ {"open_ram",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) in RAM */ {"diskless_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) in RAM */ {"per_record_weights",no_argument,0,0}, /* [flg] Weight each record (not file) by command-line numeric weights, if any */ {"prm_ints",no_argument,0,0}, /* [flg] Promote integers to floating point in output */ {"prm_ntg",no_argument,0,0}, /* [flg] Promote integers to floating point in output */ {"promote_integers",no_argument,0,0}, /* [flg] Promote integers to floating point in output */ {"prw",no_argument,0,0}, /* [flg] Weight each record (not file) by command-line numeric weights, if any */ {"share_all",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"create_share",no_argument,0,0}, /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ {"open_share",no_argument,0,0}, /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ {"unbuffered_io",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"uio",no_argument,0,0}, /* [flg] Open and create (netCDF3) file(s) with unbuffered I/O */ {"rec_apn",no_argument,0,0}, /* [flg] Append records directly to output file */ {"record_append",no_argument,0,0}, /* [flg] Append records directly to output file */ {"wrt_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"write_tmp_fl",no_argument,0,0}, /* [flg] Write output to temporary file */ {"no_tmp_fl",no_argument,0,0}, /* [flg] Do not write output to temporary file */ {"version",no_argument,0,0}, {"vrs",no_argument,0,0}, /* Long options with argument, no short option counterpart */ {"bfr_sz_hnt",required_argument,0,0}, /* [B] Buffer size hint */ {"buffer_size_hint",required_argument,0,0}, /* [B] Buffer size hint */ {"cb",required_argument,0,0}, /* [sct] Climatology and bounds information */ {"clm_bnd",required_argument,0,0}, /* [sct] Climatology and bounds information */ {"clm_nfo",required_argument,0,0}, /* [sct] Climatology and bounds information */ {"cnk_byt",required_argument,0,0}, /* [B] Chunk size in bytes */ {"chunk_byte",required_argument,0,0}, /* [B] Chunk size in bytes */ {"cnk_csh",required_argument,0,0}, /* [B] Chunk cache size in bytes */ {"chunk_cache",required_argument,0,0}, /* [B] Chunk cache size in bytes */ {"cnk_dmn",required_argument,0,0}, /* [nbr] Chunk size */ {"chunk_dimension",required_argument,0,0}, /* [nbr] Chunk size */ {"cnk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"chunk_map",required_argument,0,0}, /* [nbr] Chunking map */ {"cnk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"chunk_min",required_argument,0,0}, /* [B] Minimize size of variable to chunk */ {"cnk_plc",required_argument,0,0}, /* [nbr] Chunking policy */ {"chunk_policy",required_argument,0,0}, /* [nbr] Chunking policy */ {"cnk_scl",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"chunk_scalar",required_argument,0,0}, /* [nbr] Chunk size scalar */ {"fl_fmt",required_argument,0,0}, {"file_format",required_argument,0,0}, {"gaa",required_argument,0,0}, /* [sng] Global attribute add */ {"glb_att_add",required_argument,0,0}, /* [sng] Global attribute add */ {"hdr_pad",required_argument,0,0}, {"header_pad",required_argument,0,0}, {"ilv_srd",required_argument,0,0}, /* [flg] Interleave stride */ {"interleave_srd",required_argument,0,0}, /* [flg] Interleave stride */ {"log_lvl",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"log_level",required_argument,0,0}, /* [enm] netCDF library debugging verbosity [0..5] */ {"ppc",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ {"precision_preserving_compression",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ {"quantize",required_argument,0,0}, /* [nbr] Precision-preserving compression, i.e., number of total or decimal significant digits */ {"nsm_sfx",required_argument,0,0}, {"ensemble_suffix",required_argument,0,0}, /* Long options with short counterparts */ {"3",no_argument,0,'3'}, {"4",no_argument,0,'4'}, {"netcdf4",no_argument,0,'4'}, {"5",no_argument,0,'5'}, {"64bit_data",no_argument,0,'5'}, {"cdf5",no_argument,0,'5'}, {"pnetcdf",no_argument,0,'5'}, {"64bit_offset",no_argument,0,'6'}, {"7",no_argument,0,'7'}, {"append",no_argument,0,'A'}, {"coords",no_argument,0,'c'}, {"crd",no_argument,0,'c'}, {"xtr_ass_var",no_argument,0,'c'}, {"xcl_ass_var",no_argument,0,'C'}, {"no_coords",no_argument,0,'C'}, {"no_crd",no_argument,0,'C'}, {"dbg_lvl",required_argument,0,'D'}, {"debug",required_argument,0,'D'}, {"nco_dbg_lvl",required_argument,0,'D'}, {"dimension",required_argument,0,'d'}, {"dmn",required_argument,0,'d'}, {"fortran",no_argument,0,'F'}, {"ftn",no_argument,0,'F'}, {"fl_lst_in",no_argument,0,'H'}, {"file_list",no_argument,0,'H'}, {"history",no_argument,0,'h'}, {"hst",no_argument,0,'h'}, {"dfl_lvl",required_argument,0,'L'}, /* [enm] Deflate level */ {"deflate",required_argument,0,'L'}, /* [enm] Deflate level */ {"local",required_argument,0,'l'}, {"lcl",required_argument,0,'l'}, {"no-normalize-by-weight",no_argument,0,'N',}, {"no_nrm_by_wgt",no_argument,0,'N',}, {"nintap",required_argument,0,'n'}, {"overwrite",no_argument,0,'O'}, {"ovr",no_argument,0,'O'}, {"output",required_argument,0,'o'}, {"fl_out",required_argument,0,'o'}, {"path",required_argument,0,'p'}, {"pack",required_argument,0,'P'}, {"retain",no_argument,0,'R'}, {"rtn",no_argument,0,'R'}, {"revision",no_argument,0,'r'}, {"thr_nbr",required_argument,0,'t'}, {"threads",required_argument,0,'t'}, {"omp_num_threads",required_argument,0,'t'}, {"variable",required_argument,0,'v'}, {"wgt",required_argument,0,'w'}, {"weight",required_argument,0,'w'}, {"auxiliary",required_argument,0,'X'}, {"exclude",no_argument,0,'x'}, {"xcl",no_argument,0,'x'}, {"pseudonym",required_argument,0,'Y'}, {"program",required_argument,0,'Y'}, {"prg_nm",required_argument,0,'Y'}, {"math",required_argument,0,'y'}, {"operation",required_argument,0,'y'}, {"op_typ",required_argument,0,'y'}, {0,0,0,0} }; /* end opt_lng */ int opt_idx=0; /* Index of current long option into opt_lng array */ #ifdef _LIBINTL_H setlocale(LC_ALL,""); /* LC_ALL sets all localization tokens to same value */ bindtextdomain("nco","/home/zender/share/locale"); /* ${LOCALEDIR} is e.g., /usr/share/locale */ /* MO files should be in ${LOCALEDIR}/es/LC_MESSAGES */ textdomain("nco"); /* PACKAGE is name of program or library */ #endif /* not _LIBINTL_H */ /* Start timer and save command line */ ddra_info.tmr_flg=nco_tmr_srt; rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_mtd; cmd_ln=nco_cmd_ln_sng(argc,argv); /* Get program name and set program enum (e.g., nco_prg_id=ncra) */ nco_prg_nm=nco_prg_prs(argv[0],&nco_prg_id); #ifdef ENABLE_MPI /* MPI Initialization */ if(False) (void)fprintf(stdout,gettext("%s: WARNING Compiled with MPI\n"),nco_prg_nm); MPI_Init(&argc,&argv); MPI_Comm_size(mpi_cmm,&prc_nbr); MPI_Comm_rank(mpi_cmm,&prc_rnk); #endif /* !ENABLE_MPI */ /* Parse command line arguments */ while(1){ /* getopt_long_only() allows one dash to prefix long options */ opt=getopt_long(argc,argv,opt_sht_lst,opt_lng,&opt_idx); /* NB: access to opt_crr is only valid when long_opt is detected */ if(opt == EOF) break; /* Parse positional arguments once getopt_long() returns EOF */ opt_crr=(char *)strdup(opt_lng[opt_idx].name); /* Process long options without short option counterparts */ if(opt == 0){ if(!strcmp(opt_crr,"baa") || !strcmp(opt_crr,"bit_alg")){ nco_baa_cnv=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif baa */ if(!strcmp(opt_crr,"bfr_sz_hnt") || !strcmp(opt_crr,"buffer_size_hint")){ bfr_sz_hnt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_byt") || !strcmp(opt_crr,"chunk_byte")){ cnk_sz_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_byt */ if(!strcmp(opt_crr,"cnk_csh") || !strcmp(opt_crr,"chunk_cache")){ cnk_csh_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_csh_byt */ if(!strcmp(opt_crr,"cnk_min") || !strcmp(opt_crr,"chunk_min")){ cnk_min_byt=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk_min */ if(!strcmp(opt_crr,"cnk_dmn") || !strcmp(opt_crr,"chunk_dimension")){ /* Copy limit argument for later processing */ cnk_arg[cnk_nbr]=(char *)strdup(optarg); cnk_nbr++; } /* endif cnk */ if(!strcmp(opt_crr,"cnk_scl") || !strcmp(opt_crr,"chunk_scalar")){ cnk_sz_scl=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_map") || !strcmp(opt_crr,"chunk_map")){ /* Chunking map */ cnk_map_sng=(char *)strdup(optarg); cnk_map=nco_cnk_map_get(cnk_map_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cnk_plc") || !strcmp(opt_crr,"chunk_policy")){ /* Chunking policy */ cnk_plc_sng=(char *)strdup(optarg); cnk_plc=nco_cnk_plc_get(cnk_plc_sng); } /* endif cnk */ if(!strcmp(opt_crr,"cll_msr") || !strcmp(opt_crr,"cell_measures")) EXTRACT_CLL_MSR=True; /* [flg] Extract cell_measures variables */ if(!strcmp(opt_crr,"no_cll_msr") || !strcmp(opt_crr,"no_cell_measures")) EXTRACT_CLL_MSR=False; /* [flg] Do not extract cell_measures variables */ if(!strcmp(opt_crr,"cb") || !strcmp(opt_crr,"clm_bnd") || !strcmp(opt_crr,"clm_nfo") || !strcmp(opt_crr,"climatology_information")){ clm_nfo_sng=(char *)strdup(optarg); flg_cb=True; /* [sct] Process climatology and bounds information */ } /* !clm_nfo */ if(!strcmp(opt_crr,"frm_trm") || !strcmp(opt_crr,"formula_terms")) EXTRACT_FRM_TRM=True; /* [flg] Extract formula_terms variables */ if(!strcmp(opt_crr,"no_frm_trm") || !strcmp(opt_crr,"no_formula_terms")) EXTRACT_FRM_TRM=False; /* [flg] Do not extract formula_terms variables */ if(!strcmp(opt_crr,"cll_mth") || !strcmp(opt_crr,"cell_methods")) flg_cll_mth=True; /* [flg] Add/modify cell_methods attributes */ if(!strcmp(opt_crr,"no_cll_mth") || !strcmp(opt_crr,"no_cell_methods")) flg_cll_mth=False; /* [flg] Add/modify cell_methods attributes */ if(!strcmp(opt_crr,"mmr_cln") || !strcmp(opt_crr,"clean")) flg_mmr_cln=True; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"drt") || !strcmp(opt_crr,"mmr_drt") || !strcmp(opt_crr,"dirty")) flg_mmr_cln=False; /* [flg] Clean memory prior to exit */ if(!strcmp(opt_crr,"fl_fmt") || !strcmp(opt_crr,"file_format")) rcd=nco_create_mode_prs(optarg,&fl_out_fmt); if(!strcmp(opt_crr,"dbl") || !strcmp(opt_crr,"rth_dbl")) nco_rth_cnv=nco_rth_flt_dbl; /* [flg] Arithmetic convention: promote float to double */ if(!strcmp(opt_crr,"flt") || !strcmp(opt_crr,"rth_flt")) nco_rth_cnv=nco_rth_flt_flt; /* [flg] Arithmetic convention: keep single-precision */ if(!strcmp(opt_crr,"gaa") || !strcmp(opt_crr,"glb_att_add")){ gaa_arg=(char **)nco_realloc(gaa_arg,(gaa_nbr+1)*sizeof(char *)); gaa_arg[gaa_nbr++]=(char *)strdup(optarg); } /* endif gaa */ if(!strcmp(opt_crr,"hdf4")) nco_fmt_xtn=nco_fmt_xtn_hdf4; /* [enm] Treat file as HDF4 */ if(!strcmp(opt_crr,"hdf_upk") || !strcmp(opt_crr,"hdf_unpack")) nco_upk_cnv=nco_upk_HDF_MOD10; /* [flg] HDF unpack convention: unpacked=scale_factor*(packed-add_offset) */ if(!strcmp(opt_crr,"hdr_pad") || !strcmp(opt_crr,"header_pad")){ hdr_pad=strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); } /* endif "hdr_pad" */ if(!strcmp(opt_crr,"help") || !strcmp(opt_crr,"hlp")){ (void)nco_usg_prn(); nco_exit(EXIT_SUCCESS); } /* endif "help" */ if(!strcmp(opt_crr,"hpss_try")) HPSS_TRY=True; /* [flg] Search HPSS for unfound files */ if(!strcmp(opt_crr,"ilv_srd") || !strcmp(opt_crr,"interleave_stride")){ ilv_srd=strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); if(ilv_srd < 1L){ (void)fprintf(stdout,"%s: ERROR Interleave stride argument is %li but must be > 0\n",nco_prg_nm_get(),ilv_srd); nco_exit(EXIT_FAILURE); } /* end if */ FLG_ILV=FLG_MRO=True; /* [flg] Interleave stride */ } /* !ilv_srd */ if(!strcmp(opt_crr,"log_lvl") || !strcmp(opt_crr,"log_level")){ log_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); nc_set_log_level(log_lvl); } /* !log_lvl */ if(!strcmp(opt_crr,"md5_dgs") || !strcmp(opt_crr,"md5_digest")){ if(!md5) md5=nco_md5_ini(); md5->dgs=True; if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: INFO Will perform MD5 digests of input and output hyperslabs\n",nco_prg_nm_get()); } /* endif "md5_dgs" */ if(!strcmp(opt_crr,"mro") || !strcmp(opt_crr,"multi_record_output")) FLG_MRO=True; /* [flg] Multi-Record Output */ if(!strcmp(opt_crr,"mso") || !strcmp(opt_crr,"multi_subcycle_output")) FLG_MSO=True; /* [flg] Multi-Subcycle Output */ if(!strcmp(opt_crr,"msa_usr_rdr") || !strcmp(opt_crr,"msa_user_order")) MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ if(!strcmp(opt_crr,"nsm_fl") || !strcmp(opt_crr,"nsm_file") || !strcmp(opt_crr,"ensemble_file")){ if(nco_prg_nm) nco_prg_nm=(char *)nco_free(nco_prg_nm); nco_prg_nm=nco_prg_prs("ncfe",&nco_prg_id); } /* endif nsm_fl */ if(!strcmp(opt_crr,"nsm_grp") || !strcmp(opt_crr,"nsm_group") || !strcmp(opt_crr,"ensemble_group")){ if(nco_prg_nm) nco_prg_nm=(char *)nco_free(nco_prg_nm); nco_prg_nm=nco_prg_prs("ncge",&nco_prg_id); } /* endif nsm_grp */ if(!strcmp(opt_crr,"nsm_sfx") || !strcmp(opt_crr,"ensemble_suffix")) nsm_sfx=(char *)strdup(optarg); if(!strcmp(opt_crr,"per_record_weights") || !strcmp(opt_crr,"prw")) flg_wgt_by_rec_not_by_fl=True; /* [flg] Weight each record (not file) by command-line numeric weights, if any */ if(!strcmp(opt_crr,"ppc") || !strcmp(opt_crr,"precision_preserving_compression") || !strcmp(opt_crr,"quantize")){ ppc_arg[ppc_nbr]=(char *)strdup(optarg); ppc_nbr++; } /* endif "ppc" */ if(!strcmp(opt_crr,"prm_ints") || !strcmp(opt_crr,"prm_ntg") || !strcmp(opt_crr,"promote_integers")){ PROMOTE_INTS=True; /* [flg] Promote integers to floating point in output */ if(nco_prg_id_get() != ncra){ (void)fprintf(stdout,"%s: ERROR Option --promote_integers to archive arithmetically processed integer-valued variables as floating point values is only supported with ncra\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* end if */ } /* !prm_int */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"create_ram") || !strcmp(opt_crr,"diskless_all")) RAM_CREATE=True; /* [flg] Create (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"ram_all") || !strcmp(opt_crr,"open_ram") || !strcmp(opt_crr,"diskless_all")) RAM_OPEN=True; /* [flg] Open (netCDF3) file(s) in RAM */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"create_share")) SHARE_CREATE=True; /* [flg] Create (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"share_all") || !strcmp(opt_crr,"unbuffered_io") || !strcmp(opt_crr,"uio") || !strcmp(opt_crr,"open_share")) SHARE_OPEN=True; /* [flg] Open (netCDF3) file(s) with unbuffered I/O */ if(!strcmp(opt_crr,"rec_apn") || !strcmp(opt_crr,"record_append")){ REC_APN=True; /* [flg] Append records directly to output file */ FORCE_APPEND=True; } /* endif "rec_apn" */ if(!strcmp(opt_crr,"vrs") || !strcmp(opt_crr,"version")){ (void)nco_vrs_prn(CVS_Id,CVS_Revision); nco_exit(EXIT_SUCCESS); } /* endif "vrs" */ if(!strcmp(opt_crr,"wrt_tmp_fl") || !strcmp(opt_crr,"write_tmp_fl")) WRT_TMP_FL=True; if(!strcmp(opt_crr,"no_tmp_fl")) WRT_TMP_FL=False; } /* opt != 0 */ /* Process short options */ switch(opt){ case 0: /* Long options have already been processed, return */ break; case '3': /* Request netCDF3 output storage format */ fl_out_fmt=NC_FORMAT_CLASSIC; break; case '4': /* Request netCDF4 output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4; break; case '5': /* Request netCDF3 64-bit offset+data storage (i.e., pnetCDF) format */ fl_out_fmt=NC_FORMAT_CDF5; break; case '6': /* Request netCDF3 64-bit offset output storage format */ fl_out_fmt=NC_FORMAT_64BIT_OFFSET; break; case '7': /* Request netCDF4-classic output storage format */ fl_out_fmt=NC_FORMAT_NETCDF4_CLASSIC; break; case 'A': /* Toggle FORCE_APPEND */ FORCE_APPEND=True; break; case 'C': /* Extract all coordinates associated with extracted variables? */ EXTRACT_ASSOCIATED_COORDINATES=False; break; case 'c': EXTRACT_ALL_COORDINATES=True; break; case 'D': /* Debugging level. Default is 0. */ nco_dbg_lvl=(unsigned short int)strtoul(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtoul",sng_cnv_rcd); break; case 'd': /* Copy limit argument for later processing */ lmt_arg[lmt_nbr]=(char *)strdup(optarg); lmt_nbr++; break; case 'F': /* Toggle index convention. Default is 0-based arrays (C-style). */ FORTRAN_IDX_CNV=!FORTRAN_IDX_CNV; break; case 'G': /* Apply Group Path Editing (GPE) to output group */ /* NB: GNU getopt() optional argument syntax is ugly (requires "=" sign) so avoid it http://stackoverflow.com/questions/1052746/getopt-does-not-parse-optional-arguments-to-parameters */ gpe=nco_gpe_prs_arg(optarg); fl_out_fmt=NC_FORMAT_NETCDF4; break; case 'g': /* Copy group argument for later processing */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); grp_lst_in=nco_lst_prs_2D(optarg_lcl,",",&grp_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); break; case 'H': /* Toggle writing input file list attribute */ FL_LST_IN_APPEND=!FL_LST_IN_APPEND; break; case 'h': /* Toggle appending to history global attribute */ HISTORY_APPEND=!HISTORY_APPEND; break; case 'L': /* [enm] Deflate level. Default is 0. */ dfl_lvl=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'l': /* Local path prefix for files retrieved from remote file system */ fl_pth_lcl=(char *)strdup(optarg); break; case 'N': NRM_BY_DNM=False; NORMALIZE_BY_WEIGHT=False; break; case 'n': /* NINTAP-style abbreviation of files to average */ fl_lst_abb=nco_lst_prs_2D(optarg,",",&abb_arg_nbr); if(abb_arg_nbr < 1 || abb_arg_nbr > 6){ (void)fprintf(stdout,gettext("%s: ERROR Incorrect abbreviation for file list\n"),nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); } /* end if */ break; case 'O': /* Toggle FORCE_OVERWRITE */ FORCE_OVERWRITE=!FORCE_OVERWRITE; break; case 'o': /* Name of output file */ fl_out=(char *)strdup(optarg); break; case 'p': /* Common file path */ fl_pth=(char *)strdup(optarg); break; case 'P': /* Packing policy */ nco_pck_plc_sng=(char *)strdup(optarg); nco_pck_plc=nco_pck_plc_get(nco_pck_plc_sng); break; case 'R': /* Toggle removal of remotely-retrieved-files. Default is True. */ RM_RMT_FL_PST_PRC=!RM_RMT_FL_PST_PRC; break; case 'r': /* Print CVS program information and copyright notice */ (void)nco_vrs_prn(CVS_Id,CVS_Revision); (void)nco_lbr_vrs_prn(); (void)nco_cpy_prn(); (void)nco_cnf_prn(); nco_exit(EXIT_SUCCESS); break; case 't': /* Thread number */ thr_nbr=(int)strtol(optarg,&sng_cnv_rcd,NCO_SNG_CNV_BASE10); if(*sng_cnv_rcd) nco_sng_cnv_err(optarg,"strtol",sng_cnv_rcd); break; case 'v': /* Variables to extract/exclude */ /* Replace commas with hashes when within braces (convert back later) */ optarg_lcl=(char *)strdup(optarg); (void)nco_rx_comma2hash(optarg_lcl); var_lst_in=nco_lst_prs_2D(optarg_lcl,",",&var_lst_in_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); xtr_nbr=var_lst_in_nbr; break; case 'w': /* Per-file and per-record weights */ if(isalpha(optarg[0]) || optarg[0] == '/'){ wgt_nm=(char *)strdup(optarg); }else{ /* !wgt_nm */ optarg_lcl=(char *)strdup(optarg); wgt_lst_in=nco_lst_prs_2D(optarg_lcl,",",&wgt_nbr); optarg_lcl=(char *)nco_free(optarg_lcl); wgt_arr=(double *)nco_malloc(wgt_nbr*sizeof(double)); for(idx=0L;idx<wgt_nbr;idx++){ wgt_arr[idx]=strtod(wgt_lst_in[idx],&sng_cnv_rcd); if(*sng_cnv_rcd) nco_sng_cnv_err(wgt_lst_in[idx],"strtod",sng_cnv_rcd); wgt_avg_scl+=wgt_arr[idx]; } /* end loop over elements */ if(NORMALIZE_BY_WEIGHT) wgt_avg_scl/=wgt_nbr; else wgt_avg_scl=1.0/wgt_nbr; assert(wgt_avg_scl != 0.0); if(NORMALIZE_BY_WEIGHT) for(idx=0L;idx<wgt_nbr;idx++) wgt_arr[idx]/=wgt_avg_scl; if(nco_dbg_lvl >= nco_dbg_std){ (void)fprintf(stderr,"%s: INFO per-file or (with --prw) per-record weights: ",nco_prg_nm_get()); for(idx=0L;idx<wgt_nbr;idx++) (void)fprintf(stderr,"wgt_arr[%d]=%g%s",idx,wgt_arr[idx],idx < wgt_nbr-1 ? ", " : "\n"); } /* !dbg */ } /* !wgt_nm */ break; case 'X': /* Copy auxiliary coordinate argument for later processing */ aux_arg[aux_nbr]=(char *)strdup(optarg); aux_nbr++; MSA_USR_RDR=True; /* [flg] Multi-Slab Algorithm returns hyperslabs in user-specified order */ break; case 'x': /* Exclude rather than extract variables specified with -v */ EXCLUDE_INPUT_LIST=True; break; case 'Y': /* Pseudonym */ /* Call nco_prg_prs() to reset pseudonym */ optarg_lcl=(char *)strdup(optarg); if(nco_prg_nm) nco_prg_nm=(char *)nco_free(nco_prg_nm); nco_prg_nm=nco_prg_prs(optarg_lcl,&nco_prg_id); optarg_lcl=(char *)nco_free(optarg_lcl); break; case 'y': /* Operation type */ nco_op_typ_sng=(char *)strdup(optarg); if(nco_prg_id == ncra || nco_prg_id == ncfe) nco_op_typ=nco_op_typ_get(nco_op_typ_sng); break; case '?': /* Question mark means unrecognized option, print proper usage then EXIT_FAILURE */ (void)fprintf(stdout,"%s: ERROR in command-line syntax/options. Missing or unrecognized option. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; case '-': /* Long options are not allowed */ (void)fprintf(stderr,"%s: ERROR Long options are not available in this build. Use single letter options instead.\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); break; default: /* Print proper usage */ (void)fprintf(stdout,"%s ERROR in command-line syntax/options. Please reformulate command accordingly.\n",nco_prg_nm_get()); (void)nco_usg_prn(); nco_exit(EXIT_FAILURE); break; } /* end switch */ if(opt_crr) opt_crr=(char *)nco_free(opt_crr); } /* end while loop */ /* Set/report global chunk cache */ rcd+=nco_cnk_csh_ini(cnk_csh_byt); /* Process positional arguments and fill-in filenames */ fl_lst_in=nco_fl_lst_mk(argv,argc,optind,&fl_nbr,&fl_out,&FL_LST_IN_FROM_STDIN,FORCE_OVERWRITE); if(flg_wgt_by_rec_not_by_fl && nco_prg_id_get() != ncra){ (void)fprintf(fp_stdout,"%s: ERROR Illegal invocation of flag --per_record_weights (or --prw)\nHINT: Per-record weighting by command-line numeric weights is only available with ncra\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* flg_wgt_by_rec_not_by_fl */ if(wgt_arr){ if(wgt_nbr != fl_nbr && !flg_wgt_by_rec_not_by_fl){ (void)fprintf(fp_stdout,"%s: ERROR User-specified per-file weight array has %d elements but there are %d input files.\nHINT: Specify one weight per input file, or toggle the default behavior by invoking with --per_record_weights (or synonym --prw) which causes command-line weights to be applied per-record not per-file.\n",nco_prg_nm_get(),wgt_nbr,fl_nbr); nco_exit(EXIT_FAILURE); } /* !wgt_nbr */ } /* !wgt_arr */ /* Initialize thread information */ thr_nbr=nco_openmp_ini(thr_nbr); in_id_arr=(int *)nco_malloc(thr_nbr*sizeof(int)); /* Parse filename */ fl_in=nco_fl_nm_prs(fl_in,0,&fl_nbr,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); /* Make sure file is on local system and is readable or die trying */ fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); /* Open file using appropriate buffer size hints and verbosity */ if(RAM_OPEN) md_open=NC_NOWRITE|NC_DISKLESS; else md_open=NC_NOWRITE; if(SHARE_OPEN) md_open=md_open|NC_SHARE; rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,&in_id); (void)nco_inq_format(in_id,&fl_in_fmt); /* Initialize traversal table */ trv_tbl_init(&trv_tbl); /* Construct GTT, Group Traversal Table (groups,variables,dimensions, limits) */ (void)nco_bld_trv_tbl(in_id,trv_pth,lmt_nbr,lmt_arg,aux_nbr,aux_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,grp_lst_in,grp_lst_in_nbr,var_lst_in,var_lst_in_nbr,EXTRACT_ALL_COORDINATES,GRP_VAR_UNN,False,EXCLUDE_INPUT_LIST,EXTRACT_ASSOCIATED_COORDINATES,EXTRACT_CLL_MSR,EXTRACT_FRM_TRM,nco_pck_plc_nil,&flg_dne,trv_tbl); /* Were all user-specified dimensions found? */ (void)nco_chk_dmn(lmt_nbr,flg_dne); /* Store ncge ensemble suffix in table */ if(nco_prg_id == ncge && nsm_sfx) trv_tbl->nsm_sfx=nsm_sfx; /* Get number of variables, dimensions, and global attributes in file, file format */ (void)trv_tbl_inq((int *)NULL,(int *)NULL,(int *)NULL,&nbr_dmn_fl,&dmn_rec_fl,(int *)NULL,(int *)NULL,(int *)NULL,&nbr_var_fl,trv_tbl); /* Record handling operators only */ if(nco_prg_id == ncra || nco_prg_id == ncrcat){ /* Build record dimensions array */ (void)nco_bld_rec_dmn(in_id,FORTRAN_IDX_CNV,&lmt_rec,&nbr_rec,trv_tbl); /* Allocate arrays for multi-records cases */ flg_input_complete=(nco_bool *)nco_malloc(nbr_rec*sizeof(nco_bool)); idx_rec_out=(long *)nco_malloc(nbr_rec*sizeof(long)); rec_in_cml=(long *)nco_malloc(nbr_rec*sizeof(long)); rec_usd_cml=(long *)nco_malloc(nbr_rec*sizeof(long)); REC_LST_DSR=(nco_bool *)nco_malloc(nbr_rec*sizeof(nco_bool)); /* Initialize arrays for multi-records cases */ for(idx_rec=0;idx_rec<nbr_rec;idx_rec++){ flg_input_complete[idx_rec]=False; idx_rec_out[idx_rec]=0L; rec_in_cml[idx_rec]=0L; rec_usd_cml[idx_rec]=0L; REC_LST_DSR[idx_rec]=False; } /* Initialize arrays */ } /* Record handling operators only */ /* Is this an ARM-format data file? */ CNV_ARM=nco_cnv_arm_inq(in_id); /* NB: nco_cnv_arm_base_time_get() with same nc_id contains OpenMP critical region */ if(CNV_ARM) base_time_srt=nco_cnv_arm_base_time_get(in_id); /* Fill-in variable structure list for all extracted variables */ var=nco_fll_var_trv(in_id,&xtr_nbr,trv_tbl); /* Duplicate to output array */ var_out=(var_sct **)nco_malloc(xtr_nbr*sizeof(var_sct *)); for(idx=0;idx<xtr_nbr;idx++){ var_out[idx]=nco_var_dpl(var[idx]); (void)nco_xrf_var(var[idx],var_out[idx]); (void)nco_xrf_dmn(var_out[idx]); } /* end loop over xtr */ /* Refresh var_out with dim_out data */ (void)nco_var_dmn_refresh(var_out,xtr_nbr); /* Determine conventions (ARM/CCM/CCSM/CF/MPAS) for treating file */ cnv=nco_cnv_ini(in_id); /* Divide variable lists into lists of fixed variables and variables to be processed */ (void)nco_var_lst_dvd(var,var_out,xtr_nbr,cnv,True,nco_pck_plc_nil,nco_pck_map_nil,(dmn_sct **)NULL,0,&var_fix,&var_fix_out,&nbr_var_fix,&var_prc,&var_prc_out,&nbr_var_prc,trv_tbl); /* Store processed and fixed variables info into GTT */ (void)nco_var_prc_fix_trv(nbr_var_prc,var_prc,nbr_var_fix,var_fix,trv_tbl); /* Make output and input files consanguinous */ if(fl_out_fmt == NCO_FORMAT_UNDEFINED) fl_out_fmt=fl_in_fmt; /* Initialize, decode, and set PPC information */ if(ppc_nbr > 0) nco_ppc_ini(in_id,&dfl_lvl,fl_out_fmt,ppc_arg,ppc_nbr,trv_tbl); /* Verify output file format supports requested actions */ (void)nco_fl_fmt_vet(fl_out_fmt,cnk_nbr,dfl_lvl); /* Open output file */ fl_out_tmp=nco_fl_out_open(fl_out,&FORCE_APPEND,FORCE_OVERWRITE,fl_out_fmt,&bfr_sz_hnt,RAM_CREATE,RAM_OPEN,SHARE_CREATE,SHARE_OPEN,WRT_TMP_FL,&out_id); /* Initialize chunking from user-specified inputs */ if(fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC) rcd+=nco_cnk_ini(in_id,fl_out,cnk_arg,cnk_nbr,cnk_map,cnk_plc,cnk_csh_byt,cnk_min_byt,cnk_sz_byt,cnk_sz_scl,&cnk); /* Keep integers promoted to double-precision on output */ // if(PROMOTE_INTS) (void)nco_set_prm_typ_out(xtr_nbr,var,trv_tbl); // (void)nco_set_prm_typ_out(xtr_nbr,var,trv_tbl); if(nco_prg_id_get() == ncra) (void)nco_set_prm_typ_out(PROMOTE_INTS,xtr_nbr,var,trv_tbl); /* Define dimensions, extracted groups, variables, and attributes in output file */ (void)nco_xtr_dfn(in_id,out_id,&cnk,dfl_lvl,gpe,md5,!FORCE_APPEND,!REC_APN,False,nco_pck_plc_nil,(char *)NULL,trv_tbl); /* Define ensemble fixed variables (True parameter) */ if(nco_prg_id_get() == ncge) (void)nco_nsm_dfn_wrt(in_id,out_id,&cnk,dfl_lvl,gpe,True,trv_tbl); /* Catenate time-stamped command line to "history" global attribute */ if(HISTORY_APPEND) (void)nco_hst_att_cat(out_id,cmd_ln); if(HISTORY_APPEND && FORCE_APPEND) (void)nco_prv_att_cat(fl_in,in_id,out_id); if(gaa_nbr > 0) (void)nco_glb_att_add(out_id,gaa_arg,gaa_nbr); if(HISTORY_APPEND) (void)nco_vrs_att_cat(out_id); if(thr_nbr > 1 && HISTORY_APPEND) (void)nco_thr_att_cat(out_id,thr_nbr); /* Add input file list global attribute */ if(FL_LST_IN_APPEND && HISTORY_APPEND && FL_LST_IN_FROM_STDIN) (void)nco_fl_lst_att_cat(out_id,fl_lst_in,fl_nbr); /* Turn-off default filling behavior to enhance efficiency */ (void)nco_set_fill(out_id,NC_NOFILL,&fll_md_old); /* Add climatology_bounds attribute to output file (before cell_methods) */ if(flg_cb && (nco_prg_id == ncra || nco_prg_id == ncrcat || nco_prg_id == ncfe)){ char bnd_sng[]="bounds"; /* CF-standard time-bounds attribute name */ char clm_sng[]="climatology"; /* CF-standard climatology bounds attribute name */ char cln_sng[]="calendar"; /* CF-standard calendar attribute name */ char unt_sng[]="units"; /* NUG-standard units attribute name */ long att_sz; nc_type att_typ; cb=(clm_bnd_sct *)nco_malloc(sizeof(clm_bnd_sct)); cb->bnd2clm=False; /* [flg] Convert time-bounds to climatology bounds */ cb->bnd_val=NULL; /* [frc] Time coordinate variable values */ cb->clm2bnd=False; /* [flg] Convert climatology bounds to time-bounds */ cb->clm2clm=False; /* [flg] Convert climatology bounds to climatology bounds */ cb->clm_bnd_id_in=NC_MIN_INT; /* [id] Climatology bounds ID */ cb->clm_bnd_id_out=NC_MIN_INT; /* [id] Climatology bounds ID */ cb->clm_bnd_in=False; /* [flg] Climatology bounds appear in input */ cb->clm_bnd_nm=NULL; /* [sng] Climatology bounds name */ cb->cln_val=NULL; /* [sng] Bounds calendar value */ cb->dmn_srt_end[0]=0L;cb->dmn_srt_end[1]=1L; cb->dmn_srt_srt[0]=0L;cb->dmn_srt_srt[1]=0L; cb->mth_end=NC_MIN_INT; /* [mth] Month at climo end [1..12] format */ cb->mth_srt=NC_MIN_INT; /* [mth] Month at climo start [1..12] format */ cb->tm_bnd_id_in=NC_MIN_INT; /* [id] Time-bounds ID */ cb->tm_bnd_in=False; /* [flg] Time-bounds appear in input */ cb->tm_bnd_nm=NULL; /* [sng] Time-bounds name */ cb->tm_crd_id_in=NC_MIN_INT; /* [id] Time coordinate ID */ cb->tm_crd_nm=NULL; /* [sng] Time coordinate name */ cb->tm_val=NULL; /* [frc] Time (or climatology) bounds variable values */ cb->tpd=NC_MIN_INT; /* [nbr] Timesteps per day [0=none, 1, 2, 3, 4, 6, 8, 12, 24, ...]*/ cb->type=NC_NAT; /* [enm] Time coordinate type */ cb->unt_val=NULL; /* [sng] Bounds units value */ cb->yr_end=NC_MIN_INT; /* [yr] Year at climo start */ cb->yr_srt=NC_MIN_INT; /* [yr] Year at climo start */ if((rcd=nco_inq_varid_flg(in_id,"time",&cb->tm_crd_id_in)) == NC_NOERR) cb->tm_crd_nm=strdup("time"); else if((rcd=nco_inq_varid_flg(in_id,"Time",&cb->tm_crd_id_in)) == NC_NOERR) cb->tm_crd_nm=strdup("Time"); if(cb->tm_crd_id_in != NC_MIN_INT){ rcd=nco_inq_vartype(in_id,cb->tm_crd_id_in,&cb->type); rcd=nco_inq_att_flg(in_id,cb->tm_crd_id_in,clm_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ cb->clm_bnd_nm=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(in_id,cb->tm_crd_id_in,clm_sng,cb->clm_bnd_nm,att_typ); /* NUL-terminate attribute before using strstr() */ cb->clm_bnd_nm[att_sz]='\0'; cb->clm_bnd_in=True; }else{ cb->clm_bnd_nm=strdup("climatology_bounds"); rcd=NC_NOERR; } /* !rcd && att_typ */ rcd=nco_inq_att_flg(in_id,cb->tm_crd_id_in,bnd_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ cb->tm_bnd_nm=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(in_id,cb->tm_crd_id_in,bnd_sng,cb->tm_bnd_nm,att_typ); /* NUL-terminate attribute before using strstr() */ cb->tm_bnd_nm[att_sz]='\0'; cb->tm_bnd_in=True; }else{ cb->tm_bnd_nm=strdup("time_bnds"); rcd=NC_NOERR; } /* !rcd && att_typ */ /* Input file must have either (but not both) time bounds or climatology bounds */ if(cb->tm_bnd_in && cb->clm_bnd_in){ (void)fprintf(stderr,"%s: WARNING Climatology bounds invoked on time coordinate with both time bounds attribute \"%s\" (value = \"%s\") and climatology bounds attribute \"%s\" (value = \"%s\"). Results would be ambiguous. Turning-off climatology bounds mode.\n",nco_prg_nm_get(),bnd_sng,cb->tm_bnd_nm,clm_sng,cb->clm_bnd_nm); flg_cb=False; goto skp_cb; } /* !(cb->tm_bnd_in && cb->clm_bnd_in) */ if(!cb->tm_bnd_in && !cb->clm_bnd_in){ (void)fprintf(stderr,"%s: WARNING Climatology bounds invoked on time coordinate with neither time bounds attribute \"%s\" nor climatology bounds attribute \"%s\". No way to obtain bounding time values. Turning-off climatology bounds mode.\n",nco_prg_nm_get(),bnd_sng,clm_sng); flg_cb=False; goto skp_cb; } /* !cb->tm_bnd_in && !cb->clm_bnd_in */ }else{ /* !tm_crd_id_in */ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING Climatology bounds invoked on dataset with unknown time coordinate. Turning-off climatology bounds mode.\n",nco_prg_nm_get()); flg_cb=False; rcd=NC_NOERR; goto skp_cb; } /* !tm_crd_in */ if(cb->tm_bnd_in){ rcd=nco_inq_varid_flg(in_id,cb->tm_bnd_nm,&cb->tm_bnd_id_in); if(cb->tm_bnd_id_in == NC_MIN_INT){ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING Climatology bounds invoked on dataset with missing time bounds variable \"%s\". Turning-off climatology bounds mode.\n",nco_prg_nm_get(),cb->tm_bnd_nm); flg_cb=False; rcd=NC_NOERR; goto skp_cb; } /* !tm_bnd_id_in */ } /* !tm_bnd_in */ if(cb->clm_bnd_in){ rcd=nco_inq_varid_flg(in_id,cb->clm_bnd_nm,&cb->clm_bnd_id_in); if(cb->clm_bnd_id_in == NC_MIN_INT){ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: WARNING Climatology bounds invoked on dataset with missing climatology bounds variable \"%s\". Turning-off climatology bounds mode.\n",nco_prg_nm_get(),cb->tm_bnd_nm); flg_cb=False; rcd=NC_NOERR; goto skp_cb; } /* !tm_bnd_id_in */ } /* !clm_bnd_in */ rcd=nco_inq_varid_flg(out_id,cb->tm_crd_nm,&cb->tm_crd_id_out); if(rcd != NC_NOERR){ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: ERROR Climatology bounds did not find time coordinate in output file\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* !tm_crd_id_out */ /* Populate cb structure with information from clm_nfo_sng */ if(clm_nfo_sng) rcd=nco_clm_nfo_get(clm_nfo_sng,cb); if(cb->tpd == 0){ /* Monthly mean input */ if(cb->mth_srt == 1 && cb->mth_end == 12){ /* Climatological monthly or seasonal means will be reduced to climatological annual means */ /* DJF seasonal climos in SCD mode present as Y1,Y2,12,2 where DJF seasonal climos in SDD mode present as Y1,Y2,1,12 which is the same as ANN Thus determining clm2clm not clm2bnd for SDD DJF presents special difficulty Hardcode this case as DJF/clm2clm unless fl_nbr = 4 or 12 in which case ANN/clm2bnd */ if(fl_nbr == 3 && cb->clm_bnd_in) cb->clm2clm=True; else if((fl_nbr == 4 || fl_nbr == 12) && cb->clm_bnd_in) cb->clm2bnd=True; else{ (void)fprintf(stderr,"%s: INFO Combination of months and clm_nfo lead to ambiguous determination of clm2bnd or clm2clm. Turning-off climatology bounds mode.\n",nco_prg_nm_get()); flg_cb=False; goto skp_cb; } }else{ /* Climatological monthly or seasonal means will be processed to non-annual means */ if(cb->tm_bnd_in) cb->bnd2clm=True; if(cb->clm_bnd_in) cb->clm2clm=True; } /* !cb->mth */ // }else if(cb->tpd == 1){ /* Daily mean input is currently not handled */ //assert(cb->tpd != 1); }else if(cb->tpd >= 1){ /* Diurnally resolved input */ if(cb->tm_bnd_in) cb->bnd2clm=True; if(cb->clm_bnd_in) cb->clm2clm=True; } /* !cb->tpd */ cb->tm_val=(double *)nco_malloc(max_int(1,cb->tpd)*sizeof(double)); /* [frc] Time coordinate variable values */ cb->bnd_val=(double *)nco_malloc(max_int(1,cb->tpd)*2*sizeof(double)); /* [frc] Time (or climatology) bounds variable values */ if(cb->bnd2clm){ rcd=nco_inq_varid_flg(out_id,cb->tm_bnd_nm,&cb->tm_bnd_id_out); if(rcd != NC_NOERR){ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: ERROR Time-bounds variable %s was not copied to output file\n",nco_prg_nm_get(),cb->tm_bnd_nm); nco_exit(EXIT_FAILURE); } /* !tm_bnd_id_out */ /* Write climatology bounds to time-bounds then rename */ cb->clm_bnd_id_out=cb->tm_bnd_id_out; } /* !bnd2clm */ if(cb->clm2clm || cb->clm2bnd){ rcd=nco_inq_varid_flg(out_id,cb->clm_bnd_nm,&cb->clm_bnd_id_out); if(rcd != NC_NOERR){ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: ERROR Climatology bounds variable %s was not copied to output file\n",nco_prg_nm_get(),cb->clm_bnd_nm); nco_exit(EXIT_FAILURE); } /* !clm_bnd_id_out */ /* clm2bnd writes time-bounds to climatology bounds then renames, and clm2clm uses tm_bnd_id_out */ cb->tm_bnd_id_out=cb->clm_bnd_id_out; } /* !clm2clm */ /* Begin attribute manipulation */ aed_sct aed_mtd; char *att_nm; char *att_val; if(cb->bnd2clm || cb->clm2bnd){ /* Add new bounds attribute */ att_nm = (cb->bnd2clm) ? strdup(clm_sng) : strdup(bnd_sng); att_val= (cb->bnd2clm) ? strdup(cb->clm_bnd_nm) : strdup(cb->tm_bnd_nm); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=cb->tm_crd_nm; aed_mtd.id=cb->tm_crd_id_out; aed_mtd.sz=strlen(att_val); aed_mtd.type=NC_CHAR; aed_mtd.val.cp=att_val; aed_mtd.mode=aed_create; (void)nco_aed_prc(out_id,cb->tm_crd_id_out,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(att_val) att_val=(char *)nco_free(att_val); /* Delete old bounds attribute */ att_nm= (cb->bnd2clm) ? strdup(bnd_sng) : strdup(clm_sng); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=cb->tm_crd_nm; aed_mtd.id=cb->tm_crd_id_out; aed_mtd.mode=aed_delete; (void)nco_aed_prc(out_id,cb->tm_crd_id_out,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); } /* !bnd2clm !clm2bnd */ /* Obtain units string */ rcd=nco_inq_att_flg(out_id,cb->tm_crd_id_out,unt_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ cb->unt_val=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(out_id,cb->tm_crd_id_out,unt_sng,cb->unt_val,att_typ); /* NUL-terminate attribute before using strstr() */ cb->unt_val[att_sz]='\0'; } /* !rcd */ /* Copy units attribute from coordinate to new bounds if necessary */ if(cb->tm_bnd_in) rcd=nco_inq_att_flg(out_id,cb->tm_bnd_id_out,unt_sng,&att_typ,&att_sz); if(cb->clm_bnd_in) rcd=nco_inq_att_flg(out_id,cb->clm_bnd_id_out,unt_sng,&att_typ,&att_sz); if(rcd != NC_NOERR){ if(cb->bnd2clm || cb->clm2bnd){ /* Add units attribute */ att_nm=strdup(unt_sng); att_val=strdup(cb->unt_val); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=(cb->bnd2clm) ? cb->tm_bnd_nm : cb->clm_bnd_nm; aed_mtd.id=(cb->bnd2clm) ? cb->tm_bnd_id_out : cb->clm_bnd_id_out; aed_mtd.sz=strlen(att_val); aed_mtd.type=NC_CHAR; aed_mtd.val.cp=att_val; aed_mtd.mode=aed_create; if(cb->bnd2clm) (void)nco_aed_prc(out_id,cb->tm_bnd_id_out,aed_mtd); else (void)nco_aed_prc(out_id,cb->clm_bnd_id_out,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(att_val) att_val=(char *)nco_free(att_val); } /* !bnd2clm !clm2bnd */ rcd=NC_NOERR; } /* !rcd */ /* Obtain calendar string */ rcd=nco_inq_att_flg(out_id,cb->tm_crd_id_out,cln_sng,&att_typ,&att_sz); if(rcd == NC_NOERR && att_typ == NC_CHAR){ cb->cln_val=(char *)nco_malloc((att_sz+1L)*nco_typ_lng(att_typ)); rcd+=nco_get_att(out_id,cb->tm_crd_id_out,cln_sng,cb->cln_val,att_typ); /* NUL-terminate attribute before using strstr() */ cb->cln_val[att_sz]='\0'; } /* !rcd */ /* Copy calendar attribute from coordinate to new bounds if necessary */ if(cb->tm_bnd_in) rcd=nco_inq_att_flg(out_id,cb->tm_bnd_id_out,cln_sng,&att_typ,&att_sz); if(cb->clm_bnd_in) rcd=nco_inq_att_flg(out_id,cb->clm_bnd_id_out,cln_sng,&att_typ,&att_sz); if(rcd != NC_NOERR){ if(cb->bnd2clm || cb->clm2bnd){ /* Add calendar attribute */ att_nm=strdup(cln_sng); att_val=strdup(cb->cln_val); aed_mtd.att_nm=att_nm; aed_mtd.var_nm=(cb->bnd2clm) ? cb->tm_bnd_nm : cb->clm_bnd_nm; aed_mtd.id=(cb->bnd2clm) ? cb->tm_bnd_id_out : cb->clm_bnd_id_out; aed_mtd.sz=strlen(att_val); aed_mtd.type=NC_CHAR; aed_mtd.val.cp=att_val; aed_mtd.mode=aed_create; if(cb->bnd2clm) (void)nco_aed_prc(out_id,cb->tm_bnd_id_out,aed_mtd); else (void)nco_aed_prc(out_id,cb->clm_bnd_id_out,aed_mtd); if(att_nm) att_nm=(char *)nco_free(att_nm); if(att_val) att_val=(char *)nco_free(att_val); } /* !bnd2clm !clm2bnd */ rcd=NC_NOERR; } /* !rcd */ /* Combine calendar and units strings with clm_nfo_sng to create climatological time and bounds arrays */ if(clm_nfo_sng) rcd=nco_clm_nfo_to_tm_bnds(cb->yr_srt,cb->yr_end,cb->mth_srt,cb->mth_end,cb->tpd,cb->unt_val,cb->cln_val,cb->bnd_val,cb->tm_val); //assert(rcd != NCO_NOERR); } /* !flg_cb */ /* goto skp_cb */ skp_cb: /* free() any abandoned cb structure now or it will be inadvertently used in nco_cnv_cf_cll_mth_add() */ if(!flg_cb) if(cb) cb=(clm_bnd_sct *)nco_free(cb); /* Add cell_methods attributes (before exiting define mode) */ if(nco_prg_id == ncra || nco_prg_id == ncrcat){ dmn_sct **dmn=NULL_CEWI; int nbr_dmn=nbr_rec; dmn=(dmn_sct **)nco_malloc(nbr_dmn*sizeof(dmn_sct *)); /* Make dimension array from limit records array */ (void)nco_dmn_lmt(lmt_rec,nbr_dmn,&dmn); /* Add cell_methods attributes (pass as dimension argument a records-only array) */ if(flg_cll_mth) rcd+=nco_cnv_cf_cll_mth_add(out_id,var_prc_out,nbr_var_prc,dmn,nbr_dmn,nco_op_typ,gpe,cb,trv_tbl); if(nbr_dmn > 0) dmn=nco_dmn_lst_free(dmn,nbr_dmn); } /* !ncra */ /* Take output file out of define mode */ if(hdr_pad == 0UL){ (void)nco_enddef(out_id); }else{ (void)nco__enddef(out_id,hdr_pad); if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"%s: INFO Padding header with %lu extra bytes\n",nco_prg_nm_get(),(unsigned long)hdr_pad); } /* hdr_pad */ /* Zero start and stride vectors for all output variables */ (void)nco_var_srd_srt_set(var_out,xtr_nbr); /* Copy variable data for non-processed variables */ (void)nco_cpy_fix_var_trv(in_id,out_id,gpe,trv_tbl); /* Write ensemble fixed variables (False parameter) */ if(nco_prg_id_get() == ncge) (void)nco_nsm_dfn_wrt(in_id,out_id,&cnk,dfl_lvl,gpe,False,trv_tbl); /* Allocate and, if necesssary, initialize accumulation space for processed variables */ for(idx=0;idx<nbr_var_prc;idx++){ /* Record operators only need space for one record, not entire variable */ if(nco_prg_id == ncra || nco_prg_id == ncrcat) var_prc[idx]->sz=var_prc[idx]->sz_rec=var_prc_out[idx]->sz=var_prc_out[idx]->sz_rec; if(nco_prg_id == ncra || nco_prg_id == ncfe || nco_prg_id == ncge){ /* 20200701: Iff has_mss_val then need wgt_sum to track running sum of time-varying (per-record or per-file) weights applied at each grid point in variables that may have spatio-temporally varying missing values */ if((wgt_arr || wgt_nm) && var_prc[idx]->has_mss_val) var_prc_out[idx]->wgt_sum=var_prc[idx]->wgt_sum=(double *)nco_calloc(var_prc_out[idx]->sz,sizeof(double)); else var_prc_out[idx]->wgt_sum=NULL; var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_calloc(var_prc_out[idx]->sz,sizeof(long)); var_prc_out[idx]->val.vp=(void *)nco_calloc(var_prc_out[idx]->sz,nco_typ_lng(var_prc_out[idx]->type)); } /* end if */ } /* end loop over idx */ if(wgt_nm && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs)){ /* Find weight variable that matches current variable */ wgt=nco_var_get_wgt_trv(in_id,lmt_nbr,lmt_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,wgt_nm,var_prc[0],trv_tbl); /* ncra can handle scalar, 1-D, and degenerate 1-D weights, nces requires scalar weights */ if(nco_prg_id == ncra) assert(wgt->nbr_dim < 2); if(nco_prg_id == ncfe || nco_prg_id == ncge){ if(wgt->nbr_dim == 1) assert(wgt->sz_rec == 1L); else assert(wgt->nbr_dim == 0); } /* !ncfe */ /* Change wgt from a normal full (scalar or 1-D) variable to a scalar variable This permits us to weight with scalar arithmetic later, rather than broadcasting the weight This differs from ncwa wgt treatment (where wgt can be N-D and is always broadcast to match variable) 20150708: Unsure why nco_var_dpl() calls below generate valgrind invalid read errors */ /* 20200701: Verified that sz_rec == 1 when wgt is scalar */ // if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(stderr,"%s: DEBUG wgt_nm=%s, wgt->sz_rec=%li\n",nco_prg_nm_get(),wgt_nm,wgt->sz_rec); wgt->val.vp=(void *)nco_realloc(wgt->val.vp,wgt->sz_rec*nco_typ_lng(wgt->type)); wgt->tally=(long *)nco_realloc(wgt->tally,wgt->sz_rec*sizeof(long)); (void)nco_var_zero(wgt->type,wgt->sz_rec,wgt->val); (void)nco_zero_long(wgt->sz_rec,wgt->tally); wgt_out=nco_var_dpl(wgt); wgt_avg=nco_var_dpl(wgt_out); } /* !wgt_nm */ /* Close first input netCDF file */ nco_close(in_id); /* Timestamp end of metadata setup and disk layout */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); ddra_info.tmr_flg=nco_tmr_rgl; /* Loop over input files */ for(fl_idx=0;fl_idx<fl_nbr;fl_idx++){ /* Parse filename */ if(fl_idx != 0) fl_in=nco_fl_nm_prs(fl_in,fl_idx,(int *)NULL,fl_lst_in,abb_arg_nbr,fl_lst_abb,fl_pth); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,gettext("%s: INFO Input file %d is %s"),nco_prg_nm_get(),fl_idx,fl_in); /* Make sure file is on local system and is readable or die trying */ if(fl_idx != 0) fl_in=nco_fl_mk_lcl(fl_in,fl_pth_lcl,HPSS_TRY,&FL_RTR_RMT_LCN); if(nco_dbg_lvl >= nco_dbg_fl && FL_RTR_RMT_LCN) (void)fprintf(stderr,gettext(", local file is %s"),fl_in); if(nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(stderr,"\n"); /* Open file once per thread to improve caching */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) rcd+=nco_fl_open(fl_in,md_open,&bfr_sz_hnt,in_id_arr+thr_idx); in_id=in_id_arr[0]; /* Do ncge ensemble refresh */ if(nco_prg_id == ncge){ /* Refresh ensembles */ if(fl_idx > 0) (void)nco_nsm_ncr(in_id,trv_tbl); /* Check if ensembles are valid */ (void)nco_chk_nsm(in_id,fl_idx,trv_tbl); }else{ /* ! ncge */ /* Variables may have different ID, missing_value, type, in each file */ for(idx=0;idx<nbr_var_prc;idx++){ /* Obtain variable GTT object using full variable name */ trv_sct *trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl); /* Obtain group ID */ (void)nco_inq_grp_full_ncid(in_id,trv->grp_nm_fll,&grp_id); (void)nco_var_mtd_refresh(grp_id,var_prc[idx]); } /* end loop over variables */ } /* ! ncge */ if(wgt_nm && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs)){ /* Get weight ID in this file */ trv_sct *trv=trv_tbl_var_nm_fll(wgt_out->nm_fll,trv_tbl); (void)nco_inq_grp_full_ncid(in_id,trv->grp_nm_fll,&grp_id); (void)nco_var_mtd_refresh(grp_id,wgt_out); } /* !wgt_nm */ if(FLG_ILV && (nco_prg_id == ncfe || nco_prg_id == ncge)){ (void)fprintf(fp_stderr,"%s: ERROR Interleaving requested for operator %s\nHINT: Interleaving is only valid for ncra and ncrcat\n",nco_prg_nm_get(),nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* ! FLG_ILV */ if(nco_prg_id == ncra || nco_prg_id == ncrcat){ /* ncfe and ncge jump to else branch */ /* Loop over all record dimensions in file */ for(idx_rec=0;idx_rec<nbr_rec;idx_rec++){ char *fl_udu_sng=NULL_CEWI; char ***rgd_arr_bnds_lst=NULL_CEWI; char ***rgd_arr_climo_lst=NULL_CEWI; int rgd_arr_bnds_nbr=0; int rgd_arr_climo_nbr=0; int ilv_per_ssc; /* [nbr] Number of interleaves per sub-cycle */ /* Obtain group ID */ (void)nco_inq_grp_full_ncid(in_id,lmt_rec[idx_rec]->grp_nm_fll,&grp_id); /* Fill record array */ //if(FLG_ILV){ //lmt_rec[idx_rec]->flg_ilv=True; //lmt_rec[idx_rec]->ilv=ilv_srd; //} /* !FLG_ILV */ (void)nco_lmt_evl(grp_id,lmt_rec[idx_rec],rec_usd_cml[idx_rec],FORTRAN_IDX_CNV); /* ILV and MRO may be set in nco_lmt_evl(), and MRO may also be set on command-line */ FLG_ILV=lmt_rec[idx_rec]->flg_ilv; if(FLG_ILV) FLG_MRO=lmt_rec[idx_rec]->flg_mro; if(FLG_MRO) lmt_rec[idx_rec]->flg_mro=True; if(FLG_MSO) lmt_rec[idx_rec]->flg_mso=True; ilv_per_ssc=lmt_rec[idx_rec]->ssc/lmt_rec[idx_rec]->ilv; /* Sub-cycles never cross file boundaries in interleave-compliant files */ if(lmt_rec[idx_rec]->is_rec_dmn){ int crd_id; if(nco_inq_varid_flg(grp_id,lmt_rec[idx_rec]->nm,&crd_id) == NC_NOERR){ fl_udu_sng=nco_lmt_get_udu_att(grp_id,crd_id,"units"); rgd_arr_bnds_lst=nco_lst_cf_att(grp_id,"bounds",&rgd_arr_bnds_nbr); rgd_arr_climo_lst=nco_lst_cf_att(grp_id,"climatology",&rgd_arr_climo_nbr); } /* !crd_id */ } /* !is_rec_dmn */ if(REC_APN){ int rec_var_out_id; /* Append records directly to output file */ int rec_dmn_out_id=NCO_REC_DMN_UNDEFINED; /* Get group ID using record group full name */ (void)nco_inq_grp_full_ncid(out_id,lmt_rec[idx_rec]->grp_nm_fll,&grp_out_id); /* Get dimension ID (rec_dmn_out_id) of current record from its name */ (void)nco_inq_dimid(grp_out_id,lmt_rec[idx_rec]->nm,&rec_dmn_out_id); /* Get current size of record dimension */ (void)nco_inq_dimlen(grp_out_id,rec_dmn_out_id,&idx_rec_out[idx_rec]); /* 20181212: Re-base relative to calendar units in output file, not first input file */ if(nco_inq_varid_flg(grp_out_id,lmt_rec[idx_rec]->nm,&rec_var_out_id) == NC_NOERR){ if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(fp_stderr,"%s: DEBUG REC_APN mode changing re-base units string of variable \"%s\" from input units \"%s\" ",nco_prg_nm_get(),lmt_rec[idx_rec]->nm,lmt_rec[idx_rec]->rbs_sng); lmt_rec[idx_rec]->rbs_sng=nco_lmt_get_udu_att(grp_out_id,rec_var_out_id,"units"); if(nco_dbg_lvl_get() >= nco_dbg_fl) (void)fprintf(fp_stderr,"to output units \"%s\"\n",lmt_rec[idx_rec]->rbs_sng); } /* endif record coordinate exists in output file */ } /* !REC_APN */ if(nco_dbg_lvl_get() >= nco_dbg_crr) (void)fprintf(fp_stdout,"%s: DEBUG record %d id %d name %s rec_dmn_sz %ld units=\"%s\"\n",nco_prg_nm_get(),idx_rec,lmt_rec[idx_rec]->id,lmt_rec[idx_rec]->nm_fll,lmt_rec[idx_rec]->rec_dmn_sz,fl_udu_sng); /* NB: nco_cnv_arm_base_time_get() with same nc_id contains OpenMP critical region */ if(CNV_ARM) base_time_crr=nco_cnv_arm_base_time_get(in_id); /* Perform various error-checks on input file */ if(False) (void)nco_fl_cmp_err_chk(); /* This file could be superfluous even though desired data may be found in upcoming files */ if(nco_dbg_lvl >= nco_dbg_std) if((lmt_rec[idx_rec]->srt > lmt_rec[idx_rec]->end) && (lmt_rec[idx_rec]->rec_rmn_prv_ssc == 0L)) (void)fprintf(fp_stdout,"%s: INFO %s (input file %d) is superfluous\n",nco_prg_nm_get(),fl_in,fl_idx); rec_dmn_sz=lmt_rec[idx_rec]->rec_dmn_sz; rec_rmn_prv_ssc=lmt_rec[idx_rec]->rec_rmn_prv_ssc; /* Local copy may be decremented later */ idx_rec_crr_in= (rec_rmn_prv_ssc > 0L) ? 0L : lmt_rec[idx_rec]->srt; if(FLG_ILV && nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(fp_stdout,"%s: DEBUG After lmt_evl() for fl_idx=%d ILV=%s MRO=%s, MSO=%s, srt=%ld, end=%ld, srd=%ld, ssc=%ld, ilv=%ld, rec_idx=%ld, rec_rmn_prv_ssc=%ld, rec_rmn_prv_ilv=%ld, idx_rec_out=%ld\n",nco_prg_nm_get(),fl_idx,FLG_ILV ? "YES" : "NO",FLG_MRO ? "YES" : "NO",FLG_MSO ? "YES" : "NO",lmt_rec[idx_rec]->srt,lmt_rec[idx_rec]->end,lmt_rec[idx_rec]->srd,lmt_rec[idx_rec]->ssc,lmt_rec[idx_rec]->ilv,idx_rec_crr_in,rec_rmn_prv_ssc,rec_rmn_prv_ilv,idx_rec_out[idx_rec]); /* Sub-cycles not allowed to cross file boundaries in interleave mode */ if(FLG_ILV && lmt_rec[0]->ilv > 1 && rec_rmn_prv_ilv > 0L){ (void)fprintf(fp_stdout,"%s: ERROR interleaved sub-cycle crosses file boundary between %s (input file %d) and previous file. Diagnostic counters: rec_rmn_prv_ssc = %ld, rec_rmn_prv_ilv = %ld\n",nco_prg_nm_get(),fl_in,fl_idx,rec_rmn_prv_ssc,rec_rmn_prv_ilv); nco_exit(EXIT_FAILURE); } /* !rec_rmn_prv_ilv */ /* Master while loop over records in current file */ while(idx_rec_crr_in >= 0L && idx_rec_crr_in < rec_dmn_sz){ /* Following logic/assumptions built-in to this loop: idx_rec_crr_in points to valid record before loop is entered Loop is never entered if this file has no valid (i.e., desired) records Much conditional logic needed to prescribe group position and next record Index juggling: idx_rec_crr_in: Index of current record in current input file (increments by 1 for ssc then srd-ssc ...) idx_rec_out: Index of record in output file lmt_rec->rec_rmn_prv_ssc: Structure member, at start of this while loop, contains records remaining-to-be-read to complete subcycle group from previous file. Structure member remains constant until next file is read. rec_in_cml: Cumulative number of records, read or not, in all files opened so far. Similar to lmt_rec->rec_in_cml but augmented at end of record loop, rather than prior to record loop. rec_rmn_prv_ssc: Local copy initialized from lmt_rec structure member begins with above, and then is set to and tracks number of records remaining remaining in current group. This means it is decremented from ssc_nbr->0 for each group contained in current file. rec_rmn_prv_ilv: Tracks number of records remaining remaining in current interleaved index. This means it is decremented from ssc/ilv->0 a total of ssc_nbr/ilv_nbr times for each ssc in current file. rec_usd_cml: Cumulative number of input records used (catenated by ncrcat or operated on by ncra) Flag juggling: Groups are the vernacular for a collection of records to output (ncrcat) or reduce (ncra) When introduced in NCO 4.2.1 in 2012, "groups" and sub-cycles (née drn) were synonymous NCO 4.9.4 in 2020 introduced interleaving, which alters the meaning of groups A "group" is now a set of records that ncra reduces/normalizes/outputs as a single record Thus groups and sub-cycles are still synonomous except in ncra in interleave mode In interleave mode, ncra reduces/normalizes/outputs ilv records per ssc (i.e., one output per ssc/ilv records) A non-interleaved group has ssc records, while an interleaved group has ssc/ilv records The relevant group flags REC_FRS_GRP and REC_LST_GRP are now context-sensitive: ncra re-initializes memory at the beginning, and reduces/normalizes/outputs data at the end, respectively, of each group. In normal (non-interleave) mode, groups are sub-cycles of ssc records In interleave mode, the ilv groups per sub-cycle each contain ssc/ilv records In both normal and interleaved mode, REC_FRS_GRP/REC_LST_GRP are true for first/last records in a group, respectively, and false otherwise 20200731 To disambiguate the meanings of REC_FRS_GRP and REC_LST_GRP we introduce: REC_FRS_SSC and REC_LST_SSC for the first and last records in a sub-cycle REC_FRS_CRR_GRP_OUT and REC_LST_CRR_GRP_OUT for the first and last records in the current (ncra output group (if any) REC_LST_DSR is "sloppy"---it is only set in last input file. If last file(s) is/are superfluous, REC_LST_DSR is never set and final normalization is done outside file and record loops (along with nces normalization). FLG_BFR_NRM indicates these situations and allow us to be "sloppy" in setting REC_LST_DSR. 20200719: REC_LST_DSR is not used for FLG_ILV, since complete sub-cycles are assumed to be within a single file, and normalization always occurs at a group ending. */ if(FLG_ILV){ /* Even intra-ssc strides commence group beginnings */ if(rec_rmn_prv_ilv == 0L) REC_FRS_GRP=True; else REC_FRS_GRP=False; //if(FLG_MSO && rec_usd_cml[idx_rec]) REC_FRS_GRP=False; }else{ /* Even inter-ssc strides commence group beginnings */ if(rec_rmn_prv_ssc == 0L) REC_FRS_GRP=True; else REC_FRS_GRP=False; } /* !FLG_ILV */ /* Reset interleaved group counter to ssc/ilv records */ if(FLG_ILV && rec_rmn_prv_ilv == 0L) rec_rmn_prv_ilv=ilv_per_ssc; /* Reset sub-cycle counter to ssc records */ if(rec_rmn_prv_ssc == 0L) rec_rmn_prv_ssc=lmt_rec[idx_rec]->ssc; /* Final record triggers normalization regardless of its location within group */ if(fl_idx == fl_nbr-1 && idx_rec_crr_in == min_int(lmt_rec[idx_rec]->end+lmt_rec[idx_rec]->ssc-1L,rec_dmn_sz-1L)) REC_LST_DSR[idx_rec]=True; /* ncra reduction/normalization/writing code must know last record in current group (LRCG) for both MRO and non-MRO */ if(FLG_ILV){ if(rec_rmn_prv_ilv == 1L) REC_LST_GRP=True; else REC_LST_GRP=False; //if(FLG_MSO && !REC_LST_DSR[idx_rec]) REC_LST_GRP=False; }else{ if(rec_rmn_prv_ssc == 1L) REC_LST_GRP=True; else REC_LST_GRP=False; } /* !FLG_ILV */ /* Last stride in file has distinct index-augmenting behavior */ if(idx_rec_crr_in >= lmt_rec[idx_rec]->end) REC_SRD_LST=True; else REC_SRD_LST=False; if(FLG_ILV && nco_dbg_lvl >= nco_dbg_fl) (void)fprintf(fp_stdout,"%s: DEBUG rec_idx=%ld, rec_rmn_prv_ssc=%ld, rec_rmn_prv_ilv=%ld, REC_FRS_GRP=%s, REC_LST_GRP=%s, REC_SRD_LST=%s, REC_LST_DSR=%s, idx_rec_out=%ld\n",nco_prg_nm_get(),idx_rec_crr_in,rec_rmn_prv_ssc,rec_rmn_prv_ilv,REC_FRS_GRP ? "YES" : "NO",REC_LST_GRP ? "YES" : "NO",REC_SRD_LST ? "YES" : "NO",REC_LST_DSR[idx_rec] ? "YES" : "NO",idx_rec_out[idx_rec]); /* Retrieve this record of weight variable, if any */ if(wgt_nm && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs)) (void)nco_msa_var_get_rec_trv(in_id,wgt_out,lmt_rec[idx_rec]->nm_fll,idx_rec_crr_in,trv_tbl); /* Process all variables in current record */ if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(fp_stdout,"%s: INFO Record %ld of %s contributes to output record %ld\n",nco_prg_nm_get(),idx_rec_crr_in,fl_in,idx_rec_out[idx_rec]); #ifdef _OPENMP #pragma omp parallel for private(idx,in_id) shared(CNV_ARM,FLG_BFR_NRM,FLG_ILV,FLG_MRO,FLG_MSO,NORMALIZE_BY_WEIGHT,NRM_BY_DNM,REC_FRS_GRP,REC_LST_DSR,base_time_crr,base_time_srt,fl_idx,fl_in,fl_nbr,fl_out,fl_udu_sng,flg_skp1,flg_skp2,gpe,grp_id,grp_out_fll,grp_out_id,idx_rec,idx_rec_crr_in,idx_rec_out,in_id_arr,lmt_rec,md5,nbr_dmn_fl,nbr_rec,nbr_var_prc,nco_dbg_lvl,nco_op_typ,nco_prg_id,out_id,rcd,rec_usd_cml,rgd_arr_bnds_lst,rgd_arr_bnds_nbr,rgd_arr_climo_lst,rgd_arr_climo_nbr,thr_nbr,trv_tbl,var_out_id,var_prc,var_prc_out,var_prc_typ_pre_prm,var_trv,wgt_arr,wgt_avg,wgt_nbr,wgt_nm,wgt_out,wgt_scv) #endif /* !_OPENMP */ for(idx=0;idx<nbr_var_prc;idx++){ /* Skip variable if does not relate to current record */ flg_skp1=nco_skp_var(var_prc[idx],lmt_rec[idx_rec]->nm_fll,trv_tbl); if(flg_skp1) continue; if(thr_nbr > 1) in_id=in_id_arr[omp_get_thread_num()]; else in_id=in_id_arr[0]; if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm_fll); if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr); /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl); /* Obtain group ID */ (void)nco_inq_grp_full_ncid(in_id,var_trv->grp_nm_fll,&grp_id); /* Edit group name for output */ grp_out_fll=NULL; if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=var_trv->grp_nm_fll; /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Get variable ID */ (void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id); /* Memory management after current extracted group */ if(gpe && grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll); /* Store output variable ID */ var_prc_out[idx]->id=var_out_id; /* Retrieve this record of this variable. NB: Updates hyperslab start indices to idx_rec_crr_in */ (void)nco_msa_var_get_rec_trv(in_id,var_prc[idx],lmt_rec[idx_rec]->nm_fll,idx_rec_crr_in,trv_tbl); if(nco_prg_id == ncra) FLG_BFR_NRM=True; /* [flg] Current output buffers need normalization */ /* Re-base record coordinate and bounds if necessary (e.g., time, time_bnds) */ /* if(var_prc[idx]->is_crd_var|| nco_is_spc_in_cf_att(grp_id,"bounds",var_prc[idx]->id) || nco_is_spc_in_cf_att(grp_id,"climatology",var_prc[idx]->id)) */ /* Re-base coordinate variable to units of coordinate in the first input file If record hyperslab indice(s) are double or strings then coordinate variable and limits are (re)-read earlier by nco_lmt_evl() and if units between files are incompatible then ncra will die in that call and not in nco_cln_clc_dbl_var_dff() below */ if(var_prc[idx]->is_crd_var){ nco_bool do_rebase=False; if(!strcmp(var_prc[idx]->nm,lmt_rec[idx_rec]->nm) || nco_rgd_arr_lst_chk(rgd_arr_bnds_lst,rgd_arr_bnds_nbr,lmt_rec[idx_rec]->nm,var_prc[idx]->nm) || nco_rgd_arr_lst_chk(rgd_arr_climo_lst,rgd_arr_climo_nbr,lmt_rec[idx_rec]->nm,var_prc[idx]->nm)) do_rebase=True; if(do_rebase && fl_udu_sng && lmt_rec[idx_rec]->rbs_sng){ if(nco_cln_clc_dbl_var_dff(fl_udu_sng,lmt_rec[idx_rec]->rbs_sng,lmt_rec[idx_rec]->cln_typ,(double*)NULL,var_prc[idx]) != NCO_NOERR){ (void)fprintf(fp_stderr,"%s: ERROR in nco_cln_clc_dbl_var_dff() when attempting to re-base variable \"%s\" from units \"%s\" to \"%s\"\n",nco_prg_nm_get(),var_prc[idx]->nm,fl_udu_sng,lmt_rec[idx_rec]->rbs_sng); nco_exit(EXIT_FAILURE); } /* !nco_cln_clc_dbl_var_dff() */ //nco_free(fl_udu_sng); } /* end !do_rebase */ } /* !crd_var */ if(nco_prg_id == ncra){ nco_bool flg_rth_ntl; if(!rec_usd_cml[idx_rec] || (FLG_MRO && REC_FRS_GRP)) flg_rth_ntl=True; else flg_rth_ntl=False; /* Initialize tally and accumulation arrays when appropriate */ if(flg_rth_ntl){ (void)nco_zero_long(var_prc_out[idx]->sz,var_prc_out[idx]->tally); (void)nco_var_zero(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->val); if(var_prc_out[idx]->wgt_sum) (void)memset(var_prc_out[idx]->wgt_sum,0,var_prc_out[idx]->sz*sizeof(double)); } /* end if flg_rth_ntl */ if(var_prc[idx]->type == NC_CHAR || var_prc[idx]->type == NC_STRING){ /* Do not promote un-averagable types (NC_CHAR, NC_STRING) Stuff their first record into output buffer regardless of nco_op_typ, and ignore later records (rec_usd_cml > 1) Temporarily fixes TODO nco941 */ if(flg_rth_ntl) nco_opr_drv((long)0L,nco_op_min,var_prc[idx],var_prc_out[idx]); }else{ /* Convert char, short, long, int, and float types to doubles before arithmetic Output variable type is "sticky" so only convert on first record in group */ if(flg_rth_ntl) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ); var_prc_typ_pre_prm=var_prc[idx]->type; /* [enm] Type of variable before promotion */ var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]); /* Weight current record */ if((wgt_arr || wgt_nm) && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs) && !var_prc[idx]->is_crd_var){ if(wgt_arr){ wgt_scv.type=NC_DOUBLE; if(flg_wgt_by_rec_not_by_fl) wgt_scv.val.d=wgt_arr[idx_rec_crr_in % wgt_nbr]; else wgt_scv.val.d=wgt_arr[fl_idx]; } /* !wgt_arr */ if(wgt_nm){ wgt_scv.type=wgt_out->type; wgt_scv.val.d=wgt_out->val.dp[0]; /* Per-record weight */ } /* !wgt_nm */ if(var_prc[idx]->wgt_sum) var_prc[idx]->wgt_crr=wgt_scv.val.d; nco_scv_cnf_typ(var_prc[idx]->type,&wgt_scv); if(nco_dbg_lvl >= nco_dbg_grp && (wgt_nm || wgt_arr)) (void)fprintf(fp_stdout,"wgt_nm = %s, var_nm = %s, idx = %li, typ = %s, wgt_val = %g, wgt_crr = %g, var_val = %g, ttl = %g, tally = %ld\n",wgt_nm ? wgt_out->nm_fll : "NULL",var_prc[idx]->nm,idx_rec_crr_in,nco_typ_sng(wgt_scv.type),wgt_scv.val.d,var_prc[idx]->wgt_crr,var_prc[idx]->val.dp[0],var_prc_out[idx]->val.dp[0],var_prc_out[idx]->tally[0]); (void)nco_var_scv_mlt(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->val,&wgt_scv); if(wgt_nm && var_prc[idx]->has_mss_val){ (void)fprintf(fp_stdout,"%s: ERROR %s -w wgt_nm does not yet work on variables that contain missing values and variable %s contains a missing value attribute. This is TODO nco1124. %s will now quit rather than compute possibly erroneous values. HINT: Restrict the %s -w wgt_nm operation to variables with no missing value attributes.\n",nco_prg_nm_get(),nco_prg_nm_get(),nco_prg_nm_get(),var_prc[idx]->nm,nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* !wgt_nm */ /* Increment running total of wgt_out after its application to last processed variable for this record */ if(wgt_nm && (idx == nbr_var_prc-1)){ if(flg_rth_ntl) nco_opr_drv((long)0L,nco_op_typ,wgt_out,wgt_avg); else nco_opr_drv((long)1L,nco_op_typ,wgt_out,wgt_avg); } /* !wgt_nm */ } /* !wgt */ /* Perform arithmetic operations: avg, min, max, ttl, ... */ if(flg_rth_ntl) nco_opr_drv((long)0L,nco_op_typ,var_prc[idx],var_prc_out[idx]); else nco_opr_drv((long)1L,nco_op_typ,var_prc[idx],var_prc_out[idx]); } /* end else */ } /* end if ncra */ /* All processed variables contain record dimension and both ncrcat and ncra write records singly */ var_prc_out[idx]->srt[rec_dmn_idx]=var_prc_out[idx]->end[rec_dmn_idx]=idx_rec_out[idx_rec]; var_prc_out[idx]->cnt[rec_dmn_idx]=1L; /* Append current record to output file */ if(nco_prg_id == ncrcat){ /* Replace this time_offset value with time_offset from initial file base_time */ if(CNV_ARM && !strcmp(var_prc[idx]->nm,"time_offset")) var_prc[idx]->val.dp[0]+=(base_time_crr-base_time_srt); if(var_trv->ppc != NC_MAX_INT){ if(var_trv->flg_nsd) (void)nco_ppc_bitmask(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->val); else (void)nco_ppc_around(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc[idx]->val); } /* endif ppc */ if(nco_is_xcp(var_trv->nm)) nco_xcp_prc(var_trv->nm,var_prc_out[idx]->type,var_prc_out[idx]->sz,(char *)var_prc[idx]->val.vp); #ifdef _OPENMP #pragma omp critical #endif /* _OPENMP */ if(var_prc_out[idx]->sz_rec > 1L) (void)nco_put_vara(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc[idx]->val.vp,var_prc_out[idx]->type); else (void)nco_put_var1(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc[idx]->val.vp,var_prc_out[idx]->type); /* Perform MD5 digest of input and output data if requested */ if(md5) (void)nco_md5_chk(md5,var_prc_out[idx]->nm,var_prc_out[idx]->sz*nco_typ_lng(var_prc_out[idx]->type),grp_out_id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc[idx]->val.vp); } /* end if ncrcat */ /* Warn if record coordinate, if any, is not monotonic (unless interleaved) */ if(!FLG_ILV && nco_prg_id == ncrcat && var_prc[idx]->is_crd_var) (void)rec_crd_chk(var_prc[idx],fl_in,fl_out,idx_rec_crr_in,idx_rec_out[idx_rec]); /* Convert missing_value, if any, back to unpacked or unpromoted type Otherwise missing_value will be double-promoted when next record read in nco_msa_var_get_trv() Do not convert after last record otherwise normalization fails due to wrong missing_value type (needs promoted type, not unpacked type) 20140930: This is (too?) confusing and hard-to-follow, a better solution is to add a field mss_val_typ to var_sct and then separately and explicitly track types of both val and mss_val members. */ if(var_prc[idx]->has_mss_val && /* If there is a missing value and... */ !REC_LST_DSR[idx_rec] && /* ...More records will be read (more calls to nco_msa_var_get_trv()) and... */ !(var_prc[idx]->pck_dsk && var_prc_typ_pre_prm != var_prc_out[idx]->type) && /* Exclude conversion on situations like regression test ncra #32 */ var_prc[idx]->type != var_prc[idx]->typ_upk) /* ...variable was auto-promoted (e.g., --dbl) then */ var_prc[idx]=nco_cnv_mss_val_typ(var_prc[idx],var_prc[idx]->typ_upk); /* Demote missing value */ /* Free current input buffer */ var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp); } /* end (OpenMP parallel for) loop over variables */ if(nco_prg_id == ncra && ((FLG_MRO && REC_LST_GRP) || REC_LST_DSR[idx_rec])){ /* Normalize, multiply, etc where necessary: ncra and nces normalization blocks are identical, except ncra normalizes after every ssc records, while nces normalizes once, after files loop. 20131210: nco_cnv_mss_val_typ() can cause type of var_prc to be out-of-sync with var_prc_out nco_cnv_mss_val_typ() above works correctly for case of packing/unpacking, not for rth_dbl Options: 1. Avoid nco_cnv_mss_val_typ() above if rth_dbl is invoked. Keep it for packing. 2. In nco_opr_nrm() below, use mss_val from var_prc_out not var_prc Problem is var_prc[idx]->mss_val is typ_upk while var_prc_out is type, so normalization sets missing var_prc_out value to var_prc[idx]->mss_val read as type */ /* First, divide accumulated (not yet weighted) values by tally to obtain (non-weighted) time-means */ if(NRM_BY_DNM) (void)nco_opr_nrm(nco_op_typ,nbr_var_prc,var_prc,var_prc_out,lmt_rec[idx_rec]->nm_fll,trv_tbl); FLG_BFR_NRM=False; /* [flg] Current output buffers need normalization */ /* Second, multiply unweighted time-mean values by time-mean weights */ for(idx=0;idx<nbr_var_prc;idx++){ if(var_prc[idx]->wgt_sum){ // 20201002: fxm Condition this on if(NORMALIZE_BY_WEIGHT) as is done for ncea below? //if(NORMALIZE_BY_WEIGHT) (void)nco_var_nrm_wgt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->wgt_sum,var_prc_out[idx]->val); (void)nco_var_nrm_wgt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->wgt_sum,var_prc_out[idx]->val); } /* !wgt_sum */ } /* !idx */ if(wgt_nm && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs)){ /* Third, and only if the weight comes from a record variable in the file ... Compute mean of per-record weight, by normalizing running sum of weight by tally Then normalize all numerical record variables by mean of per-record weight Still ill-defined when MRO is invoked with --wgt Same logic applies in two locations in this code: 1. During SSC normalization inside record loop when REC_LST_DSR is true 2. After file loop for nces, and for ncra with superfluous trailing files */ wgt_avg_scv.type=NC_DOUBLE; wgt_avg->val.dp[0]/=wgt_out->tally[0]; /* NB: wgt_avg tally is kept in wgt_out */ wgt_avg_scv.val.d=wgt_avg->val.dp[0]; for(idx=0;idx<nbr_var_prc;idx++){ if(var_prc_out[idx]->is_crd_var || var_prc[idx]->type == NC_CHAR || var_prc[idx]->type == NC_STRING) continue; nco_scv_cnf_typ(var_prc_out[idx]->type,&wgt_avg_scv); if(NORMALIZE_BY_WEIGHT) (void)nco_var_scv_dvd(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val,&wgt_avg_scv); } /* end loop over var */ } /* !wgt_nm */ /* Copy averages to output file */ for(idx=0;idx<nbr_var_prc;idx++){ /* Skip variables that do not contain current record dimension */ flg_skp2=nco_skp_var(var_prc[idx],lmt_rec[idx_rec]->nm_fll,trv_tbl); if(flg_skp2) continue; /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc_out[idx]->nm_fll,trv_tbl); /* Edit group name for output */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll); /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Memory management after current extracted group */ if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll); // 20200831: var_typ_out may differ from typ_upk when PROMOTE_INTS is invoked var_typ_out= PROMOTE_INTS ? var_trv->var_typ_out : var_prc_out[idx]->typ_upk; var_prc_out[idx]=nco_var_cnf_typ(var_typ_out,var_prc_out[idx]); /* Packing/Unpacking */ if(nco_pck_plc == nco_pck_plc_all_new_att) var_prc_out[idx]=nco_put_var_pck(grp_out_id,var_prc_out[idx],nco_pck_plc); if(var_trv->ppc != NC_MAX_INT){ if(var_trv->flg_nsd) (void)nco_ppc_bitmask(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); else (void)nco_ppc_around(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); } /* endif ppc */ if(nco_is_xcp(var_trv->nm)) nco_xcp_prc(var_trv->nm,var_prc_out[idx]->type,var_prc_out[idx]->sz,(char *)var_prc_out[idx]->val.vp); if(var_prc_out[idx]->nbr_dim == 0) (void)nco_put_var1(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_typ_out); else (void)nco_put_vara(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_typ_out); } /* end loop over idx */ idx_rec_out[idx_rec]++; /* [idx] Index of current record in output file (0 is first, ...) */ } /* end if normalize and write */ /* Prepare indices and flags for next iteration */ if(nco_prg_id == ncrcat) idx_rec_out[idx_rec]++; /* [idx] Index of current record in output file (0 is first, ...) */ rec_usd_cml[idx_rec]++; /* [nbr] Cumulative number of input records used (catenated by ncrcat or operated on by ncra) */ if(nco_dbg_lvl >= nco_dbg_var) (void)fprintf(fp_stderr,"\n"); /* Finally, set index for next record or get outta' Dodge */ /* Decrement both counters for next record */ rec_rmn_prv_ssc--; if(FLG_ILV) rec_rmn_prv_ilv--; if(REC_SRD_LST){ /* Next stride or sub-cycle is not within current file */ if(FLG_ILV){ if(rec_rmn_prv_ssc > 0L){ /* Next record is within current sub-cycle */ if(rec_rmn_prv_ilv > 0L){ /* Next record is within current interleave so augment record index by interleave stride */ idx_rec_crr_in+=lmt_rec[idx_rec]->ilv; }else{ /* Otherwise set record index to start next interleave */ idx_rec_crr_in+=1L-(ilv_per_ssc-1L)*lmt_rec[idx_rec]->ilv; } /* !rec_rmn_prv_ilv */ }else{ /* !rec_rmn_prv_ssc */ /* Finished current sub-cycle so break current while loop and jump to next file */ break; } /* !rec_rmn_prv_ssc */ }else{ /* !FLG_ILV */ /* Last index depends on whether user-specified end was exact, sloppy, or caused truncation */ long end_max_crr; end_max_crr=min_lng(lmt_rec[idx_rec]->idx_end_max_abs-rec_in_cml[idx_rec],min_lng(lmt_rec[idx_rec]->end+lmt_rec[idx_rec]->ssc-1L,rec_dmn_sz-1L)); if(rec_rmn_prv_ssc > 0L && idx_rec_crr_in < end_max_crr) idx_rec_crr_in++; else break; } /* !FLG_ILV */ }else{ /* !REC_SRD_LST */ /* Next stride or sub-cycle is within current file */ if(FLG_ILV){ if(rec_rmn_prv_ssc > 0L){ /* Next record is within current sub-cycle */ if(rec_rmn_prv_ilv > 0L){ /* Next record is within current interleave so augment record index by interleave stride */ idx_rec_crr_in+=lmt_rec[idx_rec]->ilv; }else{ /* Otherwise set record index to start next interleave */ idx_rec_crr_in+=1L-(ilv_per_ssc-1L)*lmt_rec[idx_rec]->ilv; } /* !rec_rmn_prv_ilv */ }else{ /* Finished current sub-cycle so hop to next sub-cycle within file */ idx_rec_crr_in+=lmt_rec[idx_rec]->srd-lmt_rec[idx_rec]->ssc+1L; } /* !rec_rmn_prv_ssc */ }else{ /* !FLG_ILV */ /* Augment index by one within sub-cycles or hop to next sub-cycle within file */ if(rec_rmn_prv_ssc > 0L) idx_rec_crr_in++; else idx_rec_crr_in+=lmt_rec[idx_rec]->srd-lmt_rec[idx_rec]->ssc+1L; } /* !FLG_ILV */ } /* !REC_SRD_LST */ } /* end idx_rec_crr_in master while loop over records in current file */ rec_in_cml[idx_rec]+=rec_dmn_sz; /* [nbr] Cumulative number of records in all files opened so far */ lmt_rec[idx_rec]->rec_rmn_prv_ssc=rec_rmn_prv_ssc; if(fl_idx == fl_nbr-1){ /* Warn if other than number of requested records were read */ if(lmt_rec[idx_rec]->lmt_typ == lmt_dmn_idx && lmt_rec[idx_rec]->is_usr_spc_min && lmt_rec[idx_rec]->is_usr_spc_max){ long ssc_grp_nbr_max; /* [nbr] Subcycle groups that start within range */ long rec_nbr_rqs; /* Number of records user requested */ long rec_nbr_rqs_max; /* [nbr] Records that would be used by ssc_grp_nbr_max groups */ long rec_nbr_spn_act; /* [nbr] Records available within user-specified range */ long rec_nbr_spn_max; /* [nbr] Minimum record number spanned by ssc_grp_nbr_max groups */ long rec_nbr_trn; /* [nbr] Records truncated in last group */ long srd_nbr_flr; /* [nbr] Whole strides that fit within specified range */ /* Number of whole strides that fit within specified range */ srd_nbr_flr=(lmt_rec[idx_rec]->max_idx-lmt_rec[idx_rec]->min_idx)/lmt_rec[idx_rec]->srd; ssc_grp_nbr_max=1L+srd_nbr_flr; /* Number of records that would be used by N groups */ rec_nbr_rqs_max=ssc_grp_nbr_max*lmt_rec[idx_rec]->ssc; /* Minimum record number spanned by N groups of size D is N-1 strides, plus D-1 trailing members of last group */ rec_nbr_spn_max=lmt_rec[idx_rec]->srd*(ssc_grp_nbr_max-1L)+lmt_rec[idx_rec]->ssc; /* Actual number of records available within range */ rec_nbr_spn_act=1L+lmt_rec[idx_rec]->max_idx-lmt_rec[idx_rec]->min_idx; /* Number truncated in last group */ rec_nbr_trn=max_int(rec_nbr_spn_max-rec_nbr_spn_act,0L); /* Records requested is maximum minus any truncated in last group */ rec_nbr_rqs=rec_nbr_rqs_max-rec_nbr_trn; if(rec_nbr_rqs != rec_usd_cml[idx_rec]) (void)fprintf(fp_stdout,"%s: WARNING User requested %li records but %s%li were found and used\n",nco_prg_nm_get(),rec_nbr_rqs,(rec_usd_cml[idx_rec] < rec_nbr_rqs) ? "only " : "",rec_usd_cml[idx_rec]); } /* end if */ /* ... and die if no records were read ... */ if(rec_usd_cml[idx_rec] <= 0){ (void)fprintf(fp_stdout,"%s: ERROR No records lay within specified hyperslab\n",nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* end if */ } /* end if */ if(fl_udu_sng) fl_udu_sng=(char*)nco_free(fl_udu_sng); nco_rgd_arr_lst_free(rgd_arr_bnds_lst,rgd_arr_bnds_nbr); nco_rgd_arr_lst_free(rgd_arr_climo_lst,rgd_arr_climo_nbr); } /* end idx_rec loop over different record variables to process */ if(!clm_nfo_sng && flg_cb && (nco_prg_id == ncra || nco_prg_id == ncrcat)){ /* Obtain climatology bounds from input file 20200822: Deprecate this original method to obtain bounds 20160824: Currently dmn_srt_srt and dmn_srt_end indices are 0 and 1, respectively This means values are always/only taken for first record in input file Thus climatology_bounds are only correct for input files with single timestep To fix this requires updating dmn_srt_srt and dmn_srt_end with correct indices Correct indices must account for multiple input records per file and hyperslabbing (e.g., -d time,3,5) */ int var_id_in; double val_dbl; var_id_in= cb->tm_bnd_in ? cb->tm_bnd_id_in : cb->clm_bnd_id_in; rcd=nco_get_var1(in_id,var_id_in,cb->dmn_srt_srt,&val_dbl,(nc_type)NC_DOUBLE); if(fl_idx == 0) cb->tm_val[0]=val_dbl; if(val_dbl < cb->bnd_val[0]) cb->bnd_val[0]=val_dbl; rcd=nco_get_var1(in_id,var_id_in,cb->dmn_srt_end,&val_dbl,(nc_type)NC_DOUBLE); if(val_dbl > cb->bnd_val[1]) cb->bnd_val[1]=val_dbl; } /* !flg_cb */ /* End ncra, ncrcat section */ }else if(nco_prg_id == ncfe){ /* ncfe */ if(wgt_nm && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs)) (void)nco_msa_var_get_trv(in_id,wgt_out,trv_tbl); #ifdef _OPENMP #pragma omp parallel for private(idx,in_id) shared(FLG_BFR_NRM,fl_idx,gpe,grp_id,grp_out_fll,grp_out_id,in_id_arr,nbr_dmn_fl,nbr_var_prc,nco_dbg_lvl,nco_op_typ,out_id,rcd,thr_nbr,trv_tbl,var_out_id,var_prc,var_prc_out,var_trv,wgt_arr,wgt_avg,wgt_nbr,wgt_nm,wgt_out,wgt_scv) #endif /* !_OPENMP */ for(idx=0;idx<nbr_var_prc;idx++){ /* Process all variables in current file */ if(thr_nbr > 1) in_id=in_id_arr[omp_get_thread_num()]; else in_id=in_id_arr[0]; if(nco_dbg_lvl >= nco_dbg_var) rcd+=nco_var_prc_crr_prn(idx,var_prc[idx]->nm); if(nco_dbg_lvl >= nco_dbg_var) (void)fflush(fp_stderr); /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc[idx]->nm_fll,trv_tbl); /* Obtain group ID */ (void)nco_inq_grp_full_ncid(in_id,var_trv->grp_nm_fll,&grp_id); /* Edit group name for output */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll); /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Memory management after current extracted group */ if(grp_out_fll) grp_out_fll=(char *)nco_free(grp_out_fll); /* Get variable ID */ (void)nco_inq_varid(grp_out_id,var_trv->nm,&var_out_id); /* Store the output variable ID */ var_prc_out[idx]->id=var_out_id; /* Retrieve variable from disk into memory */ (void)nco_msa_var_get_trv(in_id,var_prc[idx],trv_tbl); /* Convert char, short, long, int, and float types to doubles before arithmetic Output variable type is "sticky" so only convert on first record */ if(fl_idx == 0) var_prc_out[idx]=nco_typ_cnv_rth(var_prc_out[idx],nco_op_typ); var_prc[idx]=nco_var_cnf_typ(var_prc_out[idx]->type,var_prc[idx]); /* Weight current variable (modified from per-record weighting code above) */ nco_bool flg_rth_ntl; if(fl_idx == 0) flg_rth_ntl=True; else flg_rth_ntl=False; if((wgt_arr || wgt_nm) && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs) && !var_prc[idx]->is_crd_var){ if(wgt_arr){ wgt_scv.type=NC_DOUBLE; wgt_scv.val.d=wgt_arr[fl_idx]; /* Per-file weight */ } /* !wgt_arr */ if(wgt_nm){ wgt_scv.type=wgt_out->type; wgt_scv.val.d=wgt_out->val.dp[0]; /* Per-file weight */ } /* !wgt_nm */ if(var_prc[idx]->wgt_sum) var_prc[idx]->wgt_crr=wgt_scv.val.d; nco_scv_cnf_typ(var_prc[idx]->type,&wgt_scv); if(nco_dbg_lvl >= nco_dbg_std && (wgt_nm || wgt_arr)) (void)fprintf(fp_stdout,"wgt_nm = %s, var_nm = %s, fl_idx = %i, typ = %s, wgt_val = %g, wgt_crr = %g, var_val = %g\n",wgt_nm ? wgt_out->nm_fll : "NULL",var_prc[idx]->nm,fl_idx,nco_typ_sng(wgt_scv.type),wgt_scv.val.d,var_prc[idx]->wgt_crr,var_prc[idx]->val.dp[0]); (void)nco_var_scv_mlt(var_prc[idx]->type,var_prc[idx]->sz,var_prc[idx]->has_mss_val,var_prc[idx]->mss_val,var_prc[idx]->val,&wgt_scv); if(wgt_nm && var_prc[idx]->has_mss_val){ (void)fprintf(fp_stdout,"%s: ERROR %s -w wgt_nm does not yet work on variables that contain missing values and variable %s contains a missing value attribute. This is TODO nco1124. %s will now quit rather than compute possibly erroneous values. HINT: Restrict the %s -w wgt_nm operation to variables with no missing value attributes.\n",nco_prg_nm_get(),nco_prg_nm_get(),nco_prg_nm_get(),var_prc[idx]->nm,nco_prg_nm_get()); nco_exit(EXIT_FAILURE); } /* !wgt_nm */ /* Increment running total of wgt_out after its application to last processed variable for this record */ if(wgt_nm && (idx == nbr_var_prc-1)){ if(flg_rth_ntl) nco_opr_drv((long)0L,nco_op_typ,wgt_out,wgt_avg); else nco_opr_drv((long)1L,nco_op_typ,wgt_out,wgt_avg); } /* !wgt_nm */ } /* !wgt */ /* Perform arithmetic operations: avg, min, max, ttl, ... */ /* Note: fl_idx not rec_usd_cml! */ nco_opr_drv(fl_idx,nco_op_typ,var_prc[idx],var_prc_out[idx]); FLG_BFR_NRM=True; /* [flg] Current output buffers need normalization */ /* Free current input buffer */ var_prc[idx]->val.vp=nco_free(var_prc[idx]->val.vp); } /* end (OpenMP parallel for) loop over idx */ /* End ncfe section */ }else if(nco_prg_id == ncge){ /* ncge */ trv_tbl_sct *trv_tbl1; /* [lst] Traversal table (needed for multi-file cases) */ /* Initialize traversal table */ trv_tbl_init(&trv_tbl1); /* Construct GTT using current file ID */ (void)nco_bld_trv_tbl(in_id,trv_pth,lmt_nbr,lmt_arg,aux_nbr,aux_arg,MSA_USR_RDR,FORTRAN_IDX_CNV,grp_lst_in,grp_lst_in_nbr,var_lst_in,var_lst_in_nbr,EXTRACT_ALL_COORDINATES,GRP_VAR_UNN,False,EXCLUDE_INPUT_LIST,EXTRACT_ASSOCIATED_COORDINATES,EXTRACT_CLL_MSR,EXTRACT_FRM_TRM,nco_pck_plc_nil,&flg_dne,trv_tbl1); /* Were all user-specified dimensions found? */ (void)nco_chk_dmn(lmt_nbr,flg_dne); /* Loop over ensembles in current file */ for(int idx_nsm=0;idx_nsm<trv_tbl->nsm_nbr;idx_nsm++){ if(nco_dbg_lvl > nco_dbg_std) (void)fprintf(stdout,"%s: ensemble %d: %s\n",nco_prg_nm_get(),idx_nsm,trv_tbl->nsm[idx_nsm].grp_nm_fll_prn); int mbr_srt=trv_tbl->nsm[idx_nsm].mbr_srt; int mbr_end=trv_tbl->nsm[idx_nsm].mbr_end; /* Loop over ensemble members in current file (use start and end members, multi-file cases) */ for(int idx_mbr=mbr_srt;idx_mbr<mbr_end;idx_mbr++){ /* Loop over all variables */ for(int idx_prc=0;idx_prc<nbr_var_prc;idx_prc++){ /* Obtain variable GTT object for member variable in ensemble */ var_trv=trv_tbl_var_nm_fll(var_prc[idx_prc]->nm_fll,trv_tbl); assert(var_trv); /* Skip if from different ensembles */ if(strcmp(var_trv->nsm_nm,trv_tbl->nsm[idx_nsm].grp_nm_fll_prn)) continue; /* Build new variable name */ char *grp_nm_fll=trv_tbl->nsm[idx_nsm].mbr[idx_mbr].mbr_nm_fll; char *var_nm_fll=nco_bld_nm_fll(grp_nm_fll,var_prc[idx_prc]->nm);; char *nm_fll=strdup(var_prc[idx_prc]->nm_fll); var_prc[idx_prc]->nm_fll=(char *)nco_free(var_prc[idx_prc]->nm_fll); var_prc[idx_prc]->nm_fll=nco_bld_nm_fll(grp_nm_fll,var_prc[idx_prc]->nm); if(nco_dbg_lvl > nco_dbg_std) (void)fprintf(fp_stdout,"%s:\t variable <%s>\n",nco_prg_nm_get(),var_prc[idx_prc]->nm_fll); /* Obtain group ID */ (void)nco_inq_grp_full_ncid(in_id,grp_nm_fll,&grp_id); (void)nco_var_mtd_refresh(grp_id,var_prc[idx_prc]); /* Retrieve variable from disk into memory. NB: Using table in file loop */ (void)nco_msa_var_get_trv(in_id,var_prc[idx_prc],trv_tbl1); /* Convert char, short, long, int, and float types to doubles before arithmetic Output variable type is "sticky" so only convert on first member */ if(fl_idx == 0 && idx_mbr == 0) var_prc_out[idx_prc]=nco_typ_cnv_rth(var_prc_out[idx_prc],nco_op_typ); var_prc[idx_prc]=nco_var_cnf_typ(var_prc_out[idx_prc]->type,var_prc[idx_prc]); /* Perform arithmetic operations: avg, min, max, ttl, ... */ nco_opr_drv(fl_idx+idx_mbr,nco_op_typ,var_prc[idx_prc],var_prc_out[idx_prc]); FLG_BFR_NRM=True; /* [flg] Current output buffers need normalization */ /* Put old name back */ var_prc[idx_prc]->nm_fll=(char *)nco_free(var_prc[idx_prc]->nm_fll); var_prc[idx_prc]->nm_fll=strdup(nm_fll); /* Free current input buffer */ var_prc[idx_prc]->val.vp=nco_free(var_prc[idx_prc]->val.vp); /* Free built variable name */ var_nm_fll=(char *)nco_free(var_nm_fll); nm_fll=(char *)nco_free(nm_fll); } /* end loop over var_prc */ } /* end loop over mbr */ } /* !idx_mbr */ (void)trv_tbl_free(trv_tbl1); } /* End ncge section */ /* For ncge, save helpful metadata for later handling by ncbo */ if(nco_prg_id == ncge && fl_idx == 0) (void)nco_nsm_wrt_att(in_id,out_id,gpe,trv_tbl); if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(fp_stderr,"\n"); /* Close input netCDF file */ for(thr_idx=0;thr_idx<thr_nbr;thr_idx++) nco_close(in_id_arr[thr_idx]); /* Dispose local copy of file */ if(FL_RTR_RMT_LCN && RM_RMT_FL_PST_PRC) (void)nco_fl_rm(fl_in); /* Are all our data tanks already full? */ if(nco_prg_id == ncra || nco_prg_id == ncrcat){ for(idx_rec=0;idx_rec<nbr_rec;idx_rec++){ if(!flg_input_complete[idx_rec]){ if((flg_input_complete[idx_rec]=lmt_rec[idx_rec]->flg_input_complete)){ /* NB: TODO nco1066 move input_complete break to precede record loop but remember to close open filehandles */ /* 20131209: Rewritten so file skipped only once all record dimensions have flg_input_complete Warnings about superfluous files printed only once per dimension fxm: use flg_input_complete[idx_rec] to skip completed entries in main record dimension loop above */ if(nco_dbg_lvl >= nco_dbg_std) (void)fprintf(fp_stderr,"%s: INFO All requested records for record dimension #%d (%s) were found within the first %d input file%s, next file was opened then skipped, and remaining %d input file%s need not be opened\n",nco_prg_nm_get(),idx_rec,lmt_rec[idx_rec]->nm_fll,fl_idx,(fl_idx == 1) ? "" : "s",fl_nbr-fl_idx-1,(fl_nbr-fl_idx-1 == 1) ? "" : "s"); flg_input_complete_nbr++; } /* endif superfluous */ } /* endif not already known to be complete */ } /* end loop over record dimensions */ /* Once all record dimensions are complete, break-out of file loop */ if(flg_input_complete_nbr == nbr_rec) break; } /* endif ncra || ncrcat */ } /* end loop over fl_idx */ if(FLG_ILV && lmt_rec[0]->ilv > 1 && rec_rmn_prv_ilv != 0) (void)fprintf(stderr,"%s: WARNING input ended while last interleaved sub-cycle was incomplete. This means the interleaved dimension in the last sub-cycle will contain a non-uniform number of records contributing to different indices. Consider re-defining hyperslab or input data length to ensure output all based on complete sub-cycles. Diagnostics: full sub-cycle length = %ld, stride between first elements of consecutive sub-cycles = %ld, records needed for completion of last sub-cycle and of last interleaved index are, respectively, rec_rmn_prv_ssc = %ld, and rec_rmn_prv_ilv = %ld\n",nco_prg_nm_get(),lmt_rec[0]->ssc,lmt_rec[0]->srd,rec_rmn_prv_ssc,rec_rmn_prv_ilv); /* Subcycle argument warning */ if(nco_prg_id == ncra || nco_prg_id == ncrcat){ /* fxm: Remove this or make DBG when crd_val SSC/MRO is predictable? */ for(idx_rec=0;idx_rec<nbr_rec;idx_rec++){ /* Check subcycle for each record */ if(lmt_rec[idx_rec]->ssc > 1L && (lmt_rec[idx_rec]->lmt_typ == lmt_crd_val || lmt_rec[idx_rec]->lmt_typ == lmt_udu_sng)){ if(nco_dbg_lvl >= nco_dbg_scl) (void)fprintf(stderr,"\n%s: INFO Subcycle argument SSC used in hyperslab specification for %s which will be determined based on coordinate values rather than dimension indices. The behavior of the subcycle hyperslab argument is ambiguous for coordinate-based hyperslabs---it could mean select the first SSC elements that are within the min and max coordinate values beginning with each strided point, or it could mean always select the first _consecutive_ SSC elements beginning with each strided point (regardless of their values relative to min and max). For such hyperslabs, NCO adopts the latter definition and always selects the group of SSC records beginning with each strided point. Strided points are themselves guaranteed to be within the min and max coordinates, though the subsequent members of each group are not. This is only the case when the record coordinate is not monotonic. The record coordinate is usually monotonic, so unpleasant surprises are only expected in corner cases unlikely to affect the majority of users.\n",nco_prg_nm_get(),lmt_rec[idx_rec]->nm); } /* Check subcycle for each record */ } /* !idx_rec */ } /* Subcycle argument warning */ /* Normalize, multiply, etc where necessary: ncra and nces normalization blocks are identical, except ncra normalizes after every SSC records, while nces normalizes once, after all files. Occassionally last input file(s) is/are superfluous so REC_LST_DSR never set In such cases FLG_BFR_NRM is still true, indicating ncra still needs normalization FLG_BFR_NRM is always true here for ncfe and ncge */ if(FLG_BFR_NRM){ /* First, divide accumulated (not yet weighted) values by tally to obtain (non-weighted) time-means */ if(NRM_BY_DNM) (void)nco_opr_nrm(nco_op_typ,nbr_var_prc,var_prc,var_prc_out,(char *)NULL,(trv_tbl_sct *)NULL); /* Second, multiply unweighted time-mean values by time-mean weights */ for(idx=0;idx<nbr_var_prc;idx++){ if(var_prc[idx]->wgt_sum){ //(void)nco_var_nrm_wgt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->wgt_sum,var_prc_out[idx]->val); if(NORMALIZE_BY_WEIGHT) (void)nco_var_nrm_wgt(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->tally,var_prc_out[idx]->wgt_sum,var_prc_out[idx]->val); // original code } /* !wgt_sum */ } /* !idx */ if(wgt_nm && (nco_op_typ == nco_op_avg || nco_op_typ == nco_op_mebs)){ /* Third, and only if the weight comes from a record variable in the file ... Compute mean of per-record weight, by normalizing running sum of weight by tally Then normalize all numerical record variables by mean of per-record weight Still ill-defined when MRO is invoked with --wgt Same logic applies in two locations in this code: 1. During SSC normalization inside record loop when REC_LST_DSR is true 2. After file loop for nces, and for ncra with superfluous trailing files */ wgt_avg_scv.type=NC_DOUBLE; wgt_avg->val.dp[0]/=wgt_out->tally[0]; /* NB: wgt_avg tally is kept in wgt_out */ wgt_avg_scv.val.d=wgt_avg->val.dp[0]; for(idx=0;idx<nbr_var_prc;idx++){ if(var_prc_out[idx]->is_crd_var || var_prc[idx]->type == NC_CHAR || var_prc[idx]->type == NC_STRING) continue; nco_scv_cnf_typ(var_prc_out[idx]->type,&wgt_avg_scv); if(NORMALIZE_BY_WEIGHT) (void)nco_var_scv_dvd(var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val,&wgt_avg_scv); } /* end loop over var */ } /* !wgt_nm */ } /* !FLG_BFR_NRM */ /* Manually fix YYMMDD date which was mangled by averaging */ if(cnv->CCM_CCSM_CF && nco_prg_id == ncra) (void)nco_cnv_ccm_ccsm_cf_date(grp_out_id,var_out,xtr_nbr); /* Add time variable to output file NB: nco_cnv_arm_time_install() contains OpenMP critical region */ if(CNV_ARM && nco_prg_id == ncrcat) (void)nco_cnv_arm_time_install(grp_out_id,base_time_srt,dfl_lvl); /* Copy averages to output file for ncfe and ncge always and for ncra when trailing file(s) was/were superfluous */ if(FLG_BFR_NRM){ for(idx=0;idx<nbr_var_prc;idx++){ /* Obtain variable GTT object using full variable name */ var_trv=trv_tbl_var_nm_fll(var_prc_out[idx]->nm_fll,trv_tbl); /* For ncge, group to save is ensemble parent group */ if(nco_prg_id == ncge){ /* Check if suffix needed. Appends to default name */ if(trv_tbl->nsm_sfx){ /* Define (append) then use and forget new name */ char *nm_fll_sfx=nco_bld_nsm_sfx(var_trv->grp_nm_fll_prn,trv_tbl); /* Use new name */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,nm_fll_sfx); else grp_out_fll=(char *)strdup(nm_fll_sfx); nm_fll_sfx=(char *)nco_free(nm_fll_sfx); }else{ /* Non suffix case */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->nsm_nm); else grp_out_fll=(char *)strdup(var_trv->nsm_nm); } /* !trv_tbl->nsm_sfx */ }else if(nco_prg_id == ncfe){ /* Edit group name for output */ if(gpe) grp_out_fll=nco_gpe_evl(gpe,var_trv->grp_nm_fll); else grp_out_fll=(char *)strdup(var_trv->grp_nm_fll); } /* end else */ /* Obtain output group ID */ (void)nco_inq_grp_full_ncid(out_id,grp_out_fll,&grp_out_id); /* Get output variable ID */ (void)nco_inq_varid(grp_out_id,var_prc_out[idx]->nm,&var_out_id); /* Store the output variable ID */ var_prc_out[idx]->id=var_out_id; var_prc_out[idx]=nco_var_cnf_typ(var_prc_out[idx]->typ_upk,var_prc_out[idx]); /* Packing/Unpacking */ if(nco_pck_plc == nco_pck_plc_all_new_att) var_prc_out[idx]=nco_put_var_pck(grp_out_id,var_prc_out[idx],nco_pck_plc); if(var_trv->ppc != NC_MAX_INT){ if(var_trv->flg_nsd) (void)nco_ppc_bitmask(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); else (void)nco_ppc_around(var_trv->ppc,var_prc_out[idx]->type,var_prc_out[idx]->sz,var_prc_out[idx]->has_mss_val,var_prc_out[idx]->mss_val,var_prc_out[idx]->val); } /* endif ppc */ if(var_prc_out[idx]->nbr_dim == 0) (void)nco_put_var1(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); else (void)nco_put_vara(grp_out_id,var_prc_out[idx]->id,var_prc_out[idx]->srt,var_prc_out[idx]->cnt,var_prc_out[idx]->val.vp,var_prc_out[idx]->type); } /* end loop over idx */ } /* end if ncfe and ncge */ /* Free averaging, tally, and weight buffers */ if(nco_prg_id == ncra || nco_prg_id == ncfe || nco_prg_id == ncge){ for(idx=0;idx<nbr_var_prc;idx++){ if((wgt_arr || wgt_nm) && var_prc[idx]->has_mss_val) var_prc_out[idx]->wgt_sum=var_prc[idx]->wgt_sum=(double *)nco_free(var_prc[idx]->wgt_sum); var_prc_out[idx]->tally=var_prc[idx]->tally=(long *)nco_free(var_prc[idx]->tally); var_prc_out[idx]->val.vp=nco_free(var_prc_out[idx]->val.vp); } /* end loop over idx */ } /* endif ncra || nces */ if(flg_cb && (nco_prg_id == ncra || nco_prg_id == ncrcat || nco_prg_id == ncfe)){ rcd=nco_put_var(out_id,cb->tm_crd_id_out,cb->tm_val,(nc_type)NC_DOUBLE); rcd=nco_put_var(out_id,cb->clm_bnd_id_out,cb->bnd_val,(nc_type)NC_DOUBLE); } /* !flg_cb */ if(flg_cb && (cb->bnd2clm || cb->clm2bnd)){ /* Rename time-bounds as climatology bounds, or visa-versa Otherwise wrong bounds will remain orphaned in output file Also, this ensures same dimensions are used Rename at end of procedure so that traversal table does not get out-of-sync Avoiding renaming would mean creating the new and deleting the old bounds variable That would entail significant modifications to traversal table logic Renaming seems simpler and less error prone */ rcd+=nco_redef(out_id); if(cb->bnd2clm) rcd+=nco_rename_var(out_id,cb->tm_bnd_id_out,cb->clm_bnd_nm); if(cb->clm2bnd) rcd+=nco_rename_var(out_id,cb->clm_bnd_id_out,cb->tm_bnd_nm); rcd+=nco_enddef(out_id); } /* !flg_cb */ /* Close output file and move it from temporary to permanent location */ (void)nco_fl_out_cls(fl_out,fl_out_tmp,out_id); /* Clean memory unless dirty memory allowed */ if(flg_mmr_cln){ /* NCO-generic clean-up */ /* Free individual strings/arrays */ //if(nco_dbg_lvl_get() >= nco_dbg_quiet) (void)fprintf(stdout,"%s: free quark3\n",nco_prg_nm_get()); if(cmd_ln) cmd_ln=(char *)nco_free(cmd_ln); if(clm_nfo_sng) clm_nfo_sng=(char *)nco_free(clm_nfo_sng); if(cnk_map_sng) cnk_map_sng=(char *)nco_free(cnk_map_sng); if(cnk_plc_sng) cnk_plc_sng=(char *)nco_free(cnk_plc_sng); if(fl_in) fl_in=(char *)nco_free(fl_in); if(fl_out) fl_out=(char *)nco_free(fl_out); if(fl_out_tmp) fl_out_tmp=(char *)nco_free(fl_out_tmp); if(fl_pth) fl_pth=(char *)nco_free(fl_pth); if(fl_pth_lcl) fl_pth_lcl=(char *)nco_free(fl_pth_lcl); if(in_id_arr) in_id_arr=(int *)nco_free(in_id_arr); if(wgt_arr) wgt_arr=(double *)nco_free(wgt_arr); if(wgt_nm) wgt_nm=(char *)nco_free(wgt_nm); /* Free lists of strings */ if(fl_lst_in && !fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,fl_nbr); if(fl_lst_in && fl_lst_abb) fl_lst_in=nco_sng_lst_free(fl_lst_in,1); if(fl_lst_abb) fl_lst_abb=nco_sng_lst_free(fl_lst_abb,abb_arg_nbr); if(gaa_nbr > 0) gaa_arg=nco_sng_lst_free(gaa_arg,gaa_nbr); if(var_lst_in_nbr > 0) var_lst_in=nco_sng_lst_free(var_lst_in,var_lst_in_nbr); if(wgt_nbr > 0) wgt_lst_in=nco_sng_lst_free(wgt_lst_in,wgt_nbr); /* Free limits */ for(idx=0;idx<aux_nbr;idx++) aux_arg[idx]=(char *)nco_free(aux_arg[idx]); for(idx=0;idx<lmt_nbr;idx++) lmt_arg[idx]=(char *)nco_free(lmt_arg[idx]); for(idx=0;idx<ppc_nbr;idx++) ppc_arg[idx]=(char *)nco_free(ppc_arg[idx]); /* Free chunking information */ for(idx=0;idx<cnk_nbr;idx++) cnk_arg[idx]=(char *)nco_free(cnk_arg[idx]); if(cnk_nbr > 0 && (fl_out_fmt == NC_FORMAT_NETCDF4 || fl_out_fmt == NC_FORMAT_NETCDF4_CLASSIC)) cnk.cnk_dmn=(cnk_dmn_sct **)nco_cnk_lst_free(cnk.cnk_dmn,cnk_nbr); /* Free dimension lists */ if(nbr_dmn_xtr > 0) dim=nco_dmn_lst_free(dim,nbr_dmn_xtr); if(nbr_dmn_xtr > 0) dmn_out=nco_dmn_lst_free(dmn_out,nbr_dmn_xtr); /* Free variable lists */ if(xtr_nbr > 0) var=nco_var_lst_free(var,xtr_nbr); if(xtr_nbr > 0) var_out=nco_var_lst_free(var_out,xtr_nbr); var_prc=(var_sct **)nco_free(var_prc); var_prc_out=(var_sct **)nco_free(var_prc_out); var_fix=(var_sct **)nco_free(var_fix); var_fix_out=(var_sct **)nco_free(var_fix_out); if(md5) md5=(md5_sct *)nco_md5_free(md5); if(wgt) wgt=(var_sct *)nco_var_free(wgt); if(wgt_out) wgt_out=(var_sct *)nco_var_free(wgt_out); if(wgt_avg) wgt_avg=(var_sct *)nco_var_free(wgt_avg); /* Free climatology bounds */ if(cb){ if(cb->bnd_val) cb->bnd_val=(double *)nco_free(cb->bnd_val); if(cb->clm_bnd_nm) cb->clm_bnd_nm=(char *)nco_free(cb->clm_bnd_nm); if(cb->cln_val) cb->cln_val=(char *)nco_free(cb->cln_val); if(cb->tm_bnd_nm) cb->tm_bnd_nm=(char *)nco_free(cb->tm_bnd_nm); if(cb->tm_crd_nm) cb->tm_crd_nm=(char *)nco_free(cb->tm_crd_nm); if(cb->tm_val) cb->tm_val=(double *)nco_free(cb->tm_val); if(cb->unt_val) cb->unt_val=(char *)nco_free(cb->unt_val); if(cb) cb=(clm_bnd_sct *)nco_free(cb); } /* !cb */ (void)trv_tbl_free(trv_tbl); for(idx=0;idx<lmt_nbr;idx++) flg_dne[idx].dim_nm=(char *)nco_free(flg_dne[idx].dim_nm); if(flg_dne) flg_dne=(nco_dmn_dne_t *)nco_free(flg_dne); if(flg_input_complete) flg_input_complete=(nco_bool *)nco_free(flg_input_complete); if(idx_rec_out) idx_rec_out=(long *)nco_free(idx_rec_out); if(rec_in_cml) rec_in_cml=(long *)nco_free(rec_in_cml); if(rec_usd_cml) rec_usd_cml=(long *)nco_free(rec_usd_cml); if(REC_LST_DSR) REC_LST_DSR=(nco_bool *)nco_free(REC_LST_DSR); } /* !flg_mmr_cln */ #ifdef ENABLE_MPI MPI_Finalize(); #endif /* !ENABLE_MPI */ /* End timer */ ddra_info.tmr_flg=nco_tmr_end; /* [enm] Timer flag */ rcd+=nco_ddra((char *)NULL,(char *)NULL,&ddra_info); if(rcd != NC_NOERR) nco_err_exit(rcd,"main"); nco_exit_gracefully(); return EXIT_SUCCESS; } /* end main() */
triangle_integral.c
/* "Program to compute the volume of a triangle pouch" "Unit: mm" "Author: Yitian Shao" "Created on 2021.06.04" */ #include <math.h> #include <stdio.h> #include <omp.h> #ifndef M_PI #define M_PI 3.14159265358979323846 #endif /* Triangle pouch is formed by cutting front, and symmetrically two sides, then top of a sphere */ /* 's' is the radius of top cutting circle, 'f' is the distance between the center of front cutting circle to the center of the sphere */ /* 'R' is the radius of the sphere being cutted, 'h' is the distance between the center of top cutting circle to the center of the sphere */ /* 'n' is half length of the triangle side, 'stepSize' controls the resolution of the integral */ /* Note: All input arguments must be nonnegative and 'stepSize' must be positive */ /* Compute the volume of a corner of a sphere cutted by two perpendicular planes */ double sphereCorner(double s2, double f, double R2, double stepSize) { double cornerV = 0.0, Vi = 0.0; double f2 = f*f; int iMax = (int)((s2 - f2)/stepSize); // Need integer index for parallel computing //Assign maximum number of threads for parallel computing int threadNum = omp_get_max_threads(); omp_set_num_threads(threadNum); printf("Parallel computing for sphere corner: Assigned number of threads = %d\n", threadNum); #pragma omp parallel shared(cornerV) private(Vi) { #pragma omp for for(int i = 1; i < iMax; i++) //for(double x = f2; x < s2; x += stepSize) { double x = (double)i * stepSize + f2; Vi -= stepSize * (f * sqrt(x - f2) - x * acos(f/sqrt(x))) / (2 * sqrt(R2 - x)); } #pragma omp critical { cornerV += Vi; } } return cornerV; } /* Compute the volume of the triangle pouch through boolean substration method */ /* Output unit: mm3 */ double computeVol(double s, double f, double R, double h, double n, double stepSize) { double s2 = s*s; double R2 = R*R; double h2 = h*h; double frontCornerV = sphereCorner(s2, f, R2, stepSize); double SideCornerV = sphereCorner(s2, sqrt(s2 - n*n), R2, stepSize); double upperSphereV = ( 2 * R2 * (R - h) + h * (h2 - R2) ) * M_PI/3; printf("Front = %.3f mm3, Side = %.3f mm3\n", frontCornerV, SideCornerV); return (upperSphereV - frontCornerV - 2*SideCornerV); } /* Compute the Total Electric Field Energy below the entire area of triangle pouch */ /* Output unit: (U/l)^2 * A -> (V/mm)^2 * mm^2 = V^2 */ /* Note that input arguments are for half of the triangle pouch */ /* Note that input x1 must be greater than x0 */ /* Note that in reality, z0 is smaller than h due to thickness of the pouch, therefore z0 = h - thickness */ double computeTriTEFE(double x0, double x1, double y0, double z0, double R, double m, double U, double stepSize) { double a = 0.0, b = 0.0, R2 = 0.0, c = 0.0, stepSize2 = 0.0, V = 0.0, Vi = 0.0; // y = ax + b, R2 = R square, V is (A/l^2), Vi is used for parallel computing c = x1 - x0; a = m/c; b = a * -x0; R2 = R*R; stepSize2 = stepSize*stepSize; // Area dA int iMax = (int)(c/stepSize); // Need integer index for parallel computing //Assign maximum number of threads for parallel computing int threadNum = omp_get_max_threads(); omp_set_num_threads(threadNum); printf("Parallel computing for Triangle TEFE: Assigned number of threads = %d\n", threadNum); #pragma omp parallel shared(V) private(Vi) { #pragma omp for for(int i = 0; i < iMax; i++) // OMP Alternative of: for(double x = x0; x < x1; x += stepSize) { double x = (double)i * stepSize + x0; double y1 = a * x + b; double temp = R2 - x*x; // Temp variable facilitates the computation for(double y = y0; y < y1; y += stepSize) { double l = sqrt(temp - y*y) - z0; // l is the vertical distance between electrodes //if(l < 0.02) printf("l = %f\n", l); // For debug only Vi += stepSize2 / (l*l); } } #pragma omp critical { V += Vi; } } // Note that only half of the Total Electric Field Energy is computed by the for-loop since the triangle pouch is symmetric about x-axis return (2 * V * U*U); // Total Electric Field Energy defined as (A/l^2) * U^2 } /* Compute the Total Electric Field Energy below the entire area of rectange pouch that connects the triangle pouch */ /* Output unit: (U/l)^2 * A -> (V/mm)^2 * mm^2 = V^2 */ /* Note that input arguments are for half of the rectangle pouch */ /* Note that input y1 must be greater than y0 */ /* Note that in reality, z0 is smaller than h due to thickness of the pouch, therefore z0 = h - thickness */ double computeRectTEFE(double y0, double y1, double z0, double r, double w, double U, double stepSize) { double r2 = 0.0, V = 0.0, Vi = 0.0; r2 = r*r; int iMax = (int)((y1 - y0)/stepSize); // Need integer index for parallel computing //Assign maximum number of threads for parallel computing int threadNum = omp_get_max_threads(); omp_set_num_threads(threadNum); printf("Parallel computing for Rectangle TEFE: Assigned number of threads = %d\n", threadNum); #pragma omp parallel shared(V) private(Vi) { #pragma omp for for(int i = 0; i < iMax; i++) // OMP Alternative of: for(double y = y0; y < y1; y += stepSize) { double y = (double)i * stepSize + y0; double l = sqrt(r2 - y*y) - z0; // l is the vertical distance between electrodes //if(l < 0.02) printf("l = %f\n", l); // For debug only Vi += stepSize / (l * l); } #pragma omp critical { V += Vi; } } // Note that only half of the Total Electric Field Energy is computed by the for-loop since the rectangle pouch is symmetric about x-axis return (2 * V * U*U * w); // Total Electric Field Energy defined as (A/l^2) * U^2 } /****************** [Obsoleted] Slower double integral method double compute(double x0, double x1, double y0, double z0, double R, double m, double stepSize) { double a = 0.0, b = 0.0, R2 = 0.0, c = 0.0, V = 0.0, Vi = 0.0; // y = ax + b, R2 = R square, V is the volume computed (half-triangle), Vi is used for parallel computing c = x1 - x0; a = m/c; b = a * -x0; R2 = R*R; int iMax = (int)(c/stepSize); // Need integer index for parallel computing //Assign maximum number of threads for parallel computing int threadNum = omp_get_max_threads(); omp_set_num_threads(threadNum); printf("Start parallel computing: Assigned number of threads = %d\n", threadNum); #pragma omp parallel shared(V) private(Vi) { #pragma omp for for(int i = 0; i < iMax; i++) // OMP Alternative of: for(double x = x0; x < x1; x += stepSize) { double x = (double)i * stepSize + x0; double y1 = a * x + b; double temp = R2 - x*x; // Temp variable facilitates the computation for(double y = y0; y < y1; y += stepSize) { Vi += (sqrt(temp - y*y) - z0) * stepSize * stepSize; } } //printf("Thread %d: Vi = %f\n", omp_get_thread_num(), Vi); // For debug only #pragma omp critical { V += Vi; } } return (2*V); // Note that only half of the volume is computed by the for-loop since the triangle pouch is symmetric about x-axis } ******************/
GB_unop__tan_fc64_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCUDA_DEV #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__tan_fc64_fc64) // op(A') function: GB (_unop_tran__tan_fc64_fc64) // C type: GxB_FC64_t // A type: GxB_FC64_t // cast: GxB_FC64_t cij = aij // unaryop: cij = ctan (aij) #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ GxB_FC64_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = ctan (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC64_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC64_t z = aij ; \ Cx [pC] = ctan (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TAN || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__tan_fc64_fc64) ( GxB_FC64_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ctan (z) ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; GxB_FC64_t z = aij ; Cx [p] = ctan (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__tan_fc64_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DRB020-privatemissing-var-yes.c
/* Copyright (C) 1991-2018 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it andor modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http:www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses Unicode 10.0.0. Version 10.0 of the Unicode Standard is synchronized with ISOIEC 10646:2017, fifth edition, plus the following additions from Amendment 1 to the fifth edition: - 56 emoji characters - 285 hentaigana - 3 additional Zanabazar Square characters */ /* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https:github.comLLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* tmp should be put as private to avoid race condition Data race pair: tmp@65 vs. tmp@66 */ #include <stdlib.h> int main(int argc, char * argv[]) { int i; int tmp; int len = 100; int a[len]; int _ret_val_0; if (argc>1) { len=atoi(argv[1]); } #pragma cetus private(i) #pragma loop name main#0 #pragma cetus parallel #pragma omp parallel for private(i) for (i=0; i<len; i ++ ) { a[i]=i; } #pragma cetus private(i, tmp) #pragma loop name main#1 #pragma cetus parallel #pragma omp parallel for private(i, tmp) for (i=0; i<len; i ++ ) { tmp=(a[i]+i); a[i]=tmp; } #pragma cetus private(i) #pragma loop name main#2 for (i=0; i<len; i ++ ) { printf("%d\n", a[i]); } _ret_val_0=0; return _ret_val_0; }
mea_puu_traco.c
#include <stdio.h> #include <stdlib.h> #include <limits.h> #include <omp.h> #include <math.h> #define min(a,b) (((a)<(b))?(a):(b)) #define MIN(a,b) (((a)<(b))?(a):(b)) #define max(a,b) (((a)>(b))?(a):(b)) #define MAX(a,b) (((a)>(b))?(a):(b)) #define floord(n,d) floor(((double)(n))/((double)(d))) #define ceild(n,d) ceil(((double)(n))/((double)(d))) double ** Q; double ** Qbp; double ** Pbp; double ** Pu; double ** M; int Ebp = 0; // Energy weight of base pair -2, -1, 0, 1, 2 int RT = 1; // 'Normalized' temperature 1,2,3,4,5 float ERT; int l = 0; //minimum loop length 0-5 int delta = 1; // Base pair weighting 1-5 char * RNA; //only ACGU int N; int DIM; #include "../mem.h" int paired(int i, int j) { char nt1 = RNA[i]; char nt2 = RNA[j]; if ((nt1 == 'A' && nt2 == 'U') || (nt1 == 'U' && nt2 == 'A') || (nt1 == 'G' && nt2 == 'C') || (nt1 == 'C' && nt2 == 'G') || (nt1 == 'G' && nt2 == 'U') || (nt1 == 'U' && nt2 == 'G')){ return 1;} else return 0; } int main(int argc, char *argv[]){ int num_proc=1; int i,j,k,ll,p,q; int c0, c1,c2,c3,c4,c5,c6,c7,c8,c9,c10,c11,c12,c13,c15; int t1, t2, t3, t4, t5, t6,t7; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; ERT = exp((float)-Ebp/(float)RT); srand(time(NULL)); if(argc > 1) num_proc = atoi(argv[1]); int kind=1; N = 8; DIM = 12; if(argc > 2) N = atoi(argv[2]); DIM = N+10; if(argc > 3) kind = atoi(argv[3]); omp_set_num_threads(num_proc); //printf(" -exp(Ebp/RT) = %5.3f\n", ERT); RNA = (char*) malloc(DIM * sizeof(char*)); //read from FASTA file rand_seq(RNA, N); //printf("Sequence: "); //for(i=0; i<N; i++) // printf("%c", RNA[i]); //printf("\n\n"); Q = memd(); Qbp = memd(); Pbp = memd(); Pu = memd(); M = memd(); rna_array_init(Q, 1, 1); rna_array_init(Qbp, 0, 0); rna_array_init(Pbp, 0, 0); rna_array_init(Pu, 0, 0); rna_array_init(M, 0, 0); double * Puu = (double*)malloc(DIM * sizeof(double)); double start = omp_get_wtime(); // compute the partition functions Q and Qbp if(kind==1){ #pragma scop for(i=0; i<=N; i++){ Puu[i] = 1; for(j=i+1; j<N; j++){ Puu[i] += -1 * Pbp[i][j+1]; } for(k=0; k<i; k++){ Puu[i] += -1 * Pbp[k][i+1]; } } #pragma endscop } if(kind==2) // pluto { printf("pluto\n"); /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if (N >= 0) { lbp=0; ubp=floord(N,16); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7) for (t2=lbp;t2<=ubp;t2++) { lbv=16*t2; ubv=min(N,16*t2+15); #pragma ivdep #pragma vector always for (t3=lbv;t3<=ubv;t3++) { Puu[t3] = 1;; } } if (N >= 1) { lbp=0; ubp=floord(N,16); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7) for (t2=lbp;t2<=ubp;t2++) { if (t2 <= floord(N-2,16)) { for (t4=t2;t4<=floord(N-1,16);t4++) { for (t5=16*t2;t5<=min(min(N-2,16*t2+15),16*t4+14);t5++) { for (t7=max(16*t4,t5+1);t7<=min(N-1,16*t4+15);t7++) { Puu[t5] += -1 * Pbp[t5][t7+1];; } } } } for (t4=0;t4<=min(floord(N-1,16),t2);t4++) { for (t5=max(16*t2,16*t4+1);t5<=min(N,16*t2+15);t5++) { for (t7=16*t4;t7<=min(16*t4+15,t5-1);t7++) { Puu[t5] += -1 * Pbp[t7][t5+1];; } } } } } } /* End of CLooG code */ } if(kind==3) // traco { printf("traco\n"); } if(kind==4) // traco tstile { printf("traco cor\n"); #pragma omp parallel for for( c1 = 0; c1 <= N/16; c1 += 1) { for( c2 = 0; c2 <= min(1, -8 * c1 + N / 2); c2 += 1) { if (c2 == 1) { for( c3 = 0; c3 <= -c1 + (N - 2) / 16; c3 += 1) for( c5 = 16 * c1; c5 <= min(16 * c1 + 15, N - 16 * c3 - 2); c5 += 1) for( c7 = 16 * c3 + c5 + 1; c7 <= min(N - 1, 16 * c3 + c5 + 16); c7 += 1) Puu[c5] += -1 * Pbp[c5][c7+1]; } else { for( c5 = 16 * c1; c5 <= min(N, 16 * c1 + 15); c5 += 1) Puu[c5] = 1; } } for( c3 = 0; c3 <= min(c1, floord(N - 1, 16)); c3 += 1) for( c5 = max(16 * c1, 16 * c3 + 1); c5 <= min(N, 16 * c1 + 15); c5 += 1) for( c7 = 16 * c3; c7 <= min(16 * c3 + 15, c5 - 1); c7 += 1) Puu[c5] += -1 * Pbp[k][c5+1]; } } double stop = omp_get_wtime(); printf("%.4f\n",stop - start); //printf("Q\n"); //rna_array_print(Q); //printf("Qbp\n"); //rna_array_print(Qbp); exit(0); return 0; }
karniadakis.h
#ifndef _TL_KARNIADAKIS_ #define _TL_KARNIADAKIS_ #include <array> #include "matrix.h" #include "matrix_array.h" #include "quadmat.h" namespace toefl{ /*! @brief Kinds of Stepper coefficients for karniadakis scheme * @ingroup algorithms */ enum stepper { TL_EULER, //!< Euler scheme (use for 1st step) TL_ORDER2, //!< 2nd order scheme (use for 2nd step) TL_ORDER3 //!< 3rd order scheme ( the "usual" karniadakis scheme) }; /*! @brief template traits class for various sets of coefficients in the karniadakis scheme from the karniadakis paper * @ingroup algorithms */ template< enum stepper S> struct Coefficients { static double const gamma_0; //!< Coefficient in the dft part static const double alpha[3]; //!< Coefficients for the timestep static const double beta[3]; //!< Coefficients fot the nonlinear part in the timestep. }; ///@cond template<> const double Coefficients<TL_EULER>::gamma_0 = 1.; template<> const double Coefficients<TL_EULER>::alpha[3] = {1., 0.,0.}; template<> const double Coefficients<TL_EULER>::beta[3] = {1., 0.,0.}; template<> const double Coefficients<TL_ORDER2>::gamma_0 = 1.5; template<> const double Coefficients<TL_ORDER2>::alpha[3] = {2.,-0.5,0.}; template<> const double Coefficients<TL_ORDER2>::beta[3] = {2.,-1.,0.}; template<> const double Coefficients<TL_ORDER3>::gamma_0 = 11./6.; template<> const double Coefficients<TL_ORDER3>::alpha[3] = {3.,-1.5,1./3.}; template<> const double Coefficients<TL_ORDER3>::beta[3] = {3.,-3.,1.}; ///@endcond /*! @brief pointwise multiply the n x n Matrix of coefficients by a n-vector of matrices * * @ingroup algorithms * Compute the system m0 = c00*m0 + c01*m1 + c02*m2 + ..., m1 = ... where all * of the elements are matrices and matrix-matrix multiplications are done pointwise. * @tparam T1 type of the coefficients i.e. double or std::complex<double> * @tparam T type of the matrix elements, i.e. double or std::complex<double> * @param c the coefficient matrix * @param in Input vector of matrices * @param out Output vector of matrices. Contains solution on output. * Multiplication is done inplace if in and out reference the same object! */ template< size_t n, typename T1, typename T> void multiply_coeff( const Matrix< QuadMat<T1,n>, TL_NONE>& c, const std::array< Matrix<T,TL_NONE>, n>& in, std::array< Matrix<T,TL_NONE>, n>& out ) { const size_t rows = c.rows(), cols = c.cols(); #ifdef TL_DEBUG if( c.isVoid()) throw Message( "Cannot work with void Matrices!\n", _ping_); for( unsigned k=0; k<n; k++) { if( c.rows() != in[k].rows() || c.rows() != out[k].rows()) if( c.cols() != in[k].cols() || c.cols() != out[k].cols()) throw Message( "Cannot multiply coefficients! Sizes not equal!", _ping_); if( in[k].isVoid() || out[k].isVoid() ) throw Message( "Cannot work with void Matrices!\n", _ping_); } #endif #pragma omp parallel for for( size_t i = 0; i<rows; i++) { QuadMat<T, n> temp; for( size_t j=0; j<cols; j++) { //Matrix-Vector multiplication for( unsigned k=0; k<n; k++) for( unsigned q=0; q<n; q++) temp(k,q) = c(i,j)(k,q)*in[q](i,j); for( unsigned k=0; k<n; k++) { out[k](i,j) = 0; for( unsigned q=0; q<n; q++) out[k](i,j) += temp(k,q); } } } } /*! @brief Multistep timestepper object * * @ingroup algorithms * Construction is a bit clumsy but usage is easy. This object is a solution to the * problem of computing the two steps in the karniadakis scheme. * One is in x-space the other in fourier space. The goal was to implement a solver * which is oblivious to the type of boundary conditions used and can be used for two or * three equations. * \todo n equations are available only when an implementation of an LU decomposition is available. (LAPACK?) * @tparam n size of the equations (2 or 3) * @tparam T_k the type of fourier coefficients used (double or std::complex<double>) * @tparam P_x Padding of your (real) matrices */ template< size_t n, typename T_k, enum Padding P_x> class Karniadakis { public: /*! @brief Allocate storage for the last two fields in the karniadakis scheme. * * @param rows_x # of rows of your x-space matrices * @param cols_x # of columns of your x-space matrices * @param rows_k # of rows of your k-space coefficients * @param cols_k # of columns of your k-space coefficients * @param dt the timestep */ Karniadakis(const size_t rows_x, const size_t cols_x, const size_t rows_k, const size_t cols_k, const double dt); /*! @brief Swap in the fourier coefficients. * * Swaps the coefficients into the object and allocates internal storage for the * inverse matrix. * @param coeff_origin Set of fourier coefficients, void on output. * @param normalisation A numerical discrete fourier transformation followed by its inverse usually yields the input times a constant factor. State this factor here to normalize the output of the step_ii function. */ void init_coeff( Matrix<QuadMat<T_k, n> > & coeff_origin, const double normalisation); /*! @brief Init the coefficients for step_ii * * Inverts your fourier coefficients with the correct gamma_0. * @tparam S The set of Karniadakis-Coefficients you want to use * @attention This function has to be called BEFORE a call of step_ii AND/OR * AFTER you switched steppers. */ template< enum stepper S> void invert_coeff( ); /*! @brief Compute the first part of the Karniadakis scheme * * @param v0 * The field at timestep n, that is stored by the class. * Contains v_{temp} on output. * @param n0 * The nonlinearity at timestep n. * Contains the old v2 on output. * @tparam S The set of Karniadakis-Coefficients you want to use */ template< enum stepper S> void step_i( std::array< Matrix<double, P_x>, n>& v0, std::array< Matrix<double, P_x>, n> & n0); /*! @brief Compute the second part of the Karniadakis scheme * * The result is normalized with the inverse of the normalisation factor * you specified in the init_coeff function. * @param v * The fourier transposed result of step_i on input. * Contains the multiplied coefficients on output * @tparam Fourier_T The value type of the fourier transposed matrices * @attention Call invert_coeff BEFORE the first call to step_ii with * a new stepper. */ template< class Fourier_T> inline void step_ii( std::array< Matrix< Fourier_T, TL_NONE>, n>& v) { #ifdef TL_DEBUG if( c_origin.isVoid()) throw Message( "Init coefficients first!", _ping_); #endif multiply_coeff< n,T_k,Fourier_T>( c_inv,v,v); } /*! @brief Display the original and the inverted coefficients * * @param os The outstream, the coefficients are streamed to. */ void display( std::ostream& os = std::cout) const { os << "The current coefficients are \n"<< c_origin <<"The current inverse is\n" << c_inv<<std::endl; } private: const size_t rows, cols; std::array< Matrix< double, P_x>, n> v1, v2; std::array< Matrix< double, P_x>, n> n1, n2; Matrix< QuadMat< T_k, n>, TL_NONE> c_inv; Matrix< QuadMat< T_k, n>, TL_NONE> c_origin; //contains the coeff of first call double prefactor; const double dt; }; template< size_t n, typename T, enum Padding P> Karniadakis<n,T,P>::Karniadakis( const size_t rows, const size_t cols, const size_t crows, const size_t ccols, const double dt): rows( rows), cols( cols), v1( MatrixArray<double,P,n>::construct( rows, cols)), v2(v1), n1(v1), n2(n1), c_inv( crows, ccols, TL_VOID), c_origin(c_inv), prefactor(0.), dt( dt) { } template< size_t n, typename T_k, enum Padding P> void Karniadakis<n,T_k,P>::init_coeff( Matrix<QuadMat<T_k, n> > & coeff_origin, const double normalisation) { #ifdef TL_DEBUG if( normalisation < 1.) throw Message( "Yield the prefactor, not its inverse!", _ping_); if( coeff_origin.isVoid()) throw Message("Your coefficients are void!", _ping_); if( coeff_origin.rows() != c_origin.rows() || coeff_origin.cols() != c_origin.cols()) throw Message("Your coefficients have wrong size!\n", _ping_); #endif prefactor = normalisation; if( c_origin.isVoid()) { swap_fields( c_origin, coeff_origin); c_inv.allocate( ); } else throw Message("You've already initialized coefficients", _ping_); } template< size_t n, typename T, enum Padding P> template< enum stepper S> void Karniadakis< n,T,P>::invert_coeff( ) { #ifdef TL_DEBUG if( c_origin.isVoid()) throw Message( "Init your coefficients first!", _ping_); #endif //invert coefficients for(unsigned i=0; i<c_inv.rows(); i++) for( unsigned j=0; j<c_inv.cols(); j++) { for( unsigned k=0; k<n; k++) { for( unsigned q=0; q<n; q++) c_inv(i,j)(k,q) = -prefactor*dt*c_origin(i,j)(k,q); c_inv(i,j)(k,k) += prefactor*Coefficients<S>::gamma_0; } invert( c_inv(i,j), c_inv(i,j)); } //std::cout << "C_inv "<<c_inv<<std::endl; } template< size_t n, typename T, enum Padding P> template< enum stepper S> void Karniadakis<n,T,P>::step_i( std::array< Matrix<double, P>, n>& v0, std::array< Matrix<double, P>, n> & n0) { for( unsigned k=0; k<n; k++) { #ifdef TL_DEBUG if( v0[k].isVoid()||n0[k].isVoid()) throw Message( "ERROR: Cannot work on void matrices!\n", _ping_); if( v0[k].rows() != rows || v0[k].cols() != cols) throw Message( "ERROR: One of the v0 has wrong size!\n", _ping_); if( n0[k].rows() != rows || n0[k].cols() != cols) throw Message( "ERROR: One of the n0 has wrong size!\n", _ping_); #endif #pragma omp parallel for for( size_t i = 0; i < rows; i++) { for( size_t j = 0; j < cols; j++) { n2[k](i,j) = Coefficients<S>::alpha[0]*v0[k](i,j) + Coefficients<S>::alpha[1]*v1[k](i,j) + Coefficients<S>::alpha[2]*v2[k](i,j) + dt*( Coefficients<S>::beta[0]*n0[k](i,j) + Coefficients<S>::beta[1]*n1[k](i,j) + Coefficients<S>::beta[2]*n2[k](i,j)); } } swap_fields( n2[k], v2[k]); //we want to keep v2 not n2 permute_fields( n0[k], n1[k], n2[k]); permute_fields( v0[k], v1[k], v2[k]); } } } //namespace toefl #endif //_TL_KARNIADAKIS_
GB_unop__erf_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__erf_fp64_fp64 // op(A') function: GB_unop_tran__erf_fp64_fp64 // C type: double // A type: double // cast: double cij = aij // unaryop: cij = erf (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = erf (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = erf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ERF || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__erf_fp64_fp64 ( double *Cx, // Cx and Ax may be aliased const double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = erf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__erf_fp64_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
gensqlstatehashfunc.c
/* * Perfect hash function generator for PostgreSQL SQLSTATEs. * * Copyright (c) 2014, Oskari Saarenmaa <os@ohmu.fi> * All rights reserved. * * This file is under the Apache License, Version 2.0. * See the file `LICENSE` for details. * */ #include "postgres.h" #include "utils/elog.h" #undef qsort #include <time.h> #ifndef VERIFYFUNC static inline unsigned int hashm(unsigned int h1, unsigned int modulo, unsigned int c1, unsigned int c2) { h1 ^= h1 >> 16; h1 *= 0x85ebca6b + c1; h1 ^= h1 >> 13; h1 *= 0xc2b2ae35 + c2; h1 ^= h1 >> 16; return h1 % modulo; } static int getperfect(unsigned int *nums, unsigned int cnt, unsigned int modulo) { unsigned int best = cnt, c1, best_c1 = 0, best_c2 = 0, iters = 0; time_t now; #pragma omp parallel for reduction(+:iters) for (c1=0; c1<10000; c1++) { unsigned int result[modulo], c2, i, collisions; if (best == 0) continue; for (c2=0; c2<10000; c2++) { iters ++; collisions = 0; memset(result, 0xff, sizeof(result)); for (i=0; i<cnt; i++) { unsigned int h = hashm(nums[i], modulo, c1, c2); if (result[h] != 0xffffffff) collisions ++; else result[h] = i; } #pragma omp critical { if (collisions == 0) { best = 0; best_c1 = c1; best_c2 = c2; } else if (collisions < best) best = collisions; } if (best == 0) break; } } fprintf(stderr, "%u iterations, modulo %u, best function had %u duplicates\n", iters, modulo, best); if (best > 0) return 1; now = time(NULL); printf( "/* Generated by gensqlstatehashfunc.c on %s */\n" "#define HASH_SQLSTATE_MODULO %u\n" "static inline unsigned int\n" "hash_sqlstate(unsigned int s)\n" "{\n" " s ^= s >> 16;\n" " s *= 0x85ebca6b + %u;\n" " s ^= s >> 13;\n" " s *= 0xc2b2ae35 + %u;\n" " s ^= s >> 16;\n" " return s %% HASH_SQLSTATE_MODULO;\n" "}\n", ctime(&now), modulo, best_c1, best_c2); return 0; } #else /* VERIFYFUNC */ #include "sqlstatehashfunc.c" static int verifyfunc(unsigned int *nums, unsigned int cnt) { unsigned int result[HASH_SQLSTATE_MODULO], collisions = 0, i; memset(result, 0xff, sizeof(result)); for (i=0; i<cnt; i++) { unsigned int h = hash_sqlstate(nums[i]); if (result[h] != 0xffffffff) collisions ++; else result[h] = i; } fprintf(stderr, "found %u collisions\n", collisions); return collisions ? 1 : 0; } #endif /* VERIFYFUNC */ static int cmp_uints(const void *a, const void *b) { unsigned int p1 = *(unsigned int *) a, p2 = *(unsigned int *) b; return (p1 > p2) ? 1 : (p2 > p1) ? -1 : 0; } int main(int argc, char **argv) { FILE *fp; char errcodes_h_path[1000], line[200], a, b, c, d, e; unsigned int nums[1000], cnt = 0, uniq_nums[1000], uniq_cnt = 0, i; unsigned int modulos[] = { 1409, 2027, 3061, 4583 }; if (argc != 2) { fprintf(stderr, "usage: %s `pg_config --includedir-server`\n", argv[0]); return 1; } snprintf(errcodes_h_path, sizeof(errcodes_h_path), "%s/utils/errcodes.h", argv[1]); fp = fopen(errcodes_h_path, "r"); if (fp == NULL) { perror(errcodes_h_path); return 1; } while (fgets(line, sizeof(line), fp) != NULL) if (sscanf(line, "#define ERRCODE_%*s MAKE_SQLSTATE('%c','%c','%c','%c','%c')", &a, &b, &c, &d, &e) == 5) nums[cnt++] = MAKE_SQLSTATE(a, b, c, d, e); fclose(fp); qsort(nums, cnt, sizeof(unsigned int), cmp_uints); for (i=0; i<cnt; i++) if (i == 0 || nums[i] != nums[i-1]) uniq_nums[uniq_cnt++] = nums[i]; fprintf(stderr, "input set size: %u\n", uniq_cnt); #ifndef VERIFYFUNC for (i=0; i<sizeof(modulos)/sizeof(modulos[0]); i++) { int res = getperfect(uniq_nums, uniq_cnt, modulos[i]); if (res == 0) return 0; } return 1; #else /* VERIFYFUNC */ return verifyfunc(uniq_nums, uniq_cnt); #endif /* VERIFYFUNC */ }
displacement_lagrangemultiplier_residual_contact_criteria.h
// KRATOS ___| | | | // \___ \ __| __| | | __| __| | | __| _` | | // | | | | | ( | | | | ( | | // _____/ \__|_| \__,_|\___|\__|\__,_|_| \__,_|_| MECHANICS // // License: BSD License // license: StructuralMechanicsApplication/license.txt // // Main authors: Vicente Mataix Ferrandiz // #if !defined(KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H) #define KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H /* System includes */ /* External includes */ /* Project includes */ #include "utilities/table_stream_utility.h" #include "solving_strategies/convergencecriterias/convergence_criteria.h" #include "utilities/color_utilities.h" #include "utilities/constraint_utilities.h" namespace Kratos { ///@addtogroup ContactStructuralMechanicsApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@name Kratos Classes ///@{ /** * @class DisplacementLagrangeMultiplierResidualContactCriteria * @ingroup ContactStructuralMechanicsApplication * @brief Convergence criteria for contact problems * This class implements a convergence control based on nodal displacement and * lagrange multiplier values. The error is evaluated separately for each of them, and * relative and absolute tolerances for both must be specified. * @author Vicente Mataix Ferrandiz */ template< class TSparseSpace, class TDenseSpace > class DisplacementLagrangeMultiplierResidualContactCriteria : public ConvergenceCriteria< TSparseSpace, TDenseSpace > { public: ///@name Type Definitions ///@{ /// Pointer definition of DisplacementLagrangeMultiplierResidualContactCriteria KRATOS_CLASS_POINTER_DEFINITION( DisplacementLagrangeMultiplierResidualContactCriteria ); /// Local Flags KRATOS_DEFINE_LOCAL_FLAG( ENSURE_CONTACT ); KRATOS_DEFINE_LOCAL_FLAG( PRINTING_OUTPUT ); KRATOS_DEFINE_LOCAL_FLAG( TABLE_IS_INITIALIZED ); KRATOS_DEFINE_LOCAL_FLAG( INITIAL_RESIDUAL_IS_SET ); /// The base class definition (and it subclasses) typedef ConvergenceCriteria< TSparseSpace, TDenseSpace > BaseType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; /// The sparse space used typedef TSparseSpace SparseSpaceType; /// The r_table stream definition TODO: Replace by logger typedef TableStreamUtility::Pointer TablePrinterPointerType; /// The index type definition typedef std::size_t IndexType; /// The key type definition typedef std::size_t KeyType; ///@} ///@name Life Cycle ///@{ /** * @brief Default constructor (parameters) * @param DispRatioTolerance Relative tolerance for displacement residual error * @param DispAbsTolerance Absolute tolerance for displacement residual error * @param LMRatioTolerance Relative tolerance for lagrange multiplier residual error * @param LMAbsTolerance Absolute tolerance for lagrange multiplier residual error * @param EnsureContact To check if the contact is lost * @param pTable The pointer to the output r_table * @param PrintingOutput If the output is going to be printed in a txt file */ explicit DisplacementLagrangeMultiplierResidualContactCriteria( const TDataType DispRatioTolerance, const TDataType DispAbsTolerance, const TDataType LMRatioTolerance, const TDataType LMAbsTolerance, const bool EnsureContact = false, const bool PrintingOutput = false ) : BaseType() { // Set local flags mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, EnsureContact); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, PrintingOutput); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); mDispRatioTolerance = DispRatioTolerance; mDispAbsTolerance = DispAbsTolerance; mLMRatioTolerance = LMRatioTolerance; mLMAbsTolerance = LMAbsTolerance; } /** * @brief Default constructor (parameters) * @param ThisParameters The configuration parameters */ explicit DisplacementLagrangeMultiplierResidualContactCriteria( Parameters ThisParameters = Parameters(R"({})")) : BaseType() { // The default parameters Parameters default_parameters = Parameters(R"( { "ensure_contact" : false, "print_convergence_criterion" : false, "residual_relative_tolerance" : 1.0e-4, "residual_absolute_tolerance" : 1.0e-9, "contact_residual_relative_tolerance" : 1.0e-4, "contact_residual_absolute_tolerance" : 1.0e-9 })" ); ThisParameters.ValidateAndAssignDefaults(default_parameters); // The displacement residual mDispRatioTolerance = ThisParameters["residual_relative_tolerance"].GetDouble(); mDispAbsTolerance = ThisParameters["residual_absolute_tolerance"].GetDouble(); // The contact residual mLMRatioTolerance = ThisParameters["contact_displacement_absolute_tolerance"].GetDouble(); mLMAbsTolerance = ThisParameters["contact_residual_absolute_tolerance"].GetDouble(); // Set local flags mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT, ThisParameters["ensure_contact"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT, ThisParameters["print_convergence_criterion"].GetBool()); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, false); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); } //* Copy constructor. DisplacementLagrangeMultiplierResidualContactCriteria( DisplacementLagrangeMultiplierResidualContactCriteria const& rOther ) :BaseType(rOther) ,mOptions(rOther.mOptions) ,mDispRatioTolerance(rOther.mDispRatioTolerance) ,mDispAbsTolerance(rOther.mDispAbsTolerance) ,mDispInitialResidualNorm(rOther.mDispInitialResidualNorm) ,mDispCurrentResidualNorm(rOther.mDispCurrentResidualNorm) ,mLMRatioTolerance(rOther.mLMRatioTolerance) ,mLMAbsTolerance(rOther.mLMAbsTolerance) ,mLMInitialResidualNorm(rOther.mLMInitialResidualNorm) ,mLMCurrentResidualNorm(rOther.mLMCurrentResidualNorm) { } /// Destructor. ~DisplacementLagrangeMultiplierResidualContactCriteria() override = default; ///@} ///@name Operators ///@{ /** * @brief Compute relative and absolute error. * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) * @return true if convergence is achieved, false otherwise */ bool PostCriteria( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { if (SparseSpaceType::Size(rb) != 0) { //if we are solving for something // Initialize TDataType disp_residual_solution_norm = 0.0, lm_residual_solution_norm = 0.0; IndexType disp_dof_num(0),lm_dof_num(0); // First iterator const auto it_dof_begin = rDofSet.begin(); // Auxiliar values std::size_t dof_id = 0; TDataType residual_dof_value = 0.0; // The number of active dofs const std::size_t number_active_dofs = rb.size(); // Loop over Dofs #pragma omp parallel for firstprivate(dof_id, residual_dof_value) reduction(+:disp_residual_solution_norm,lm_residual_solution_norm,disp_dof_num,lm_dof_num) for (int i = 0; i < static_cast<int>(rDofSet.size()); i++) { auto it_dof = it_dof_begin + i; dof_id = it_dof->EquationId(); // Check dof id is solved if (dof_id < number_active_dofs) { if (mActiveDofs[dof_id]) { residual_dof_value = rb[dof_id]; const auto& r_curr_var = it_dof->GetVariable(); if ((r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_X) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Y) || (r_curr_var == VECTOR_LAGRANGE_MULTIPLIER_Z) || (r_curr_var == LAGRANGE_MULTIPLIER_CONTACT_PRESSURE)) { lm_residual_solution_norm += residual_dof_value * residual_dof_value; ++lm_dof_num; } else { disp_residual_solution_norm += residual_dof_value * residual_dof_value; ++disp_dof_num; } } } } mDispCurrentResidualNorm = disp_residual_solution_norm; mLMCurrentResidualNorm = lm_residual_solution_norm; TDataType residual_disp_ratio = 1.0; TDataType residual_lm_ratio = 1.0; // We initialize the solution if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET)) { mDispInitialResidualNorm = (disp_residual_solution_norm == 0.0) ? 1.0 : disp_residual_solution_norm; mLMInitialResidualNorm = (lm_residual_solution_norm == 0.0) ? 1.0 : lm_residual_solution_norm; residual_disp_ratio = 1.0; residual_lm_ratio = 1.0; mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, true); } // We calculate the ratio of the displacements residual_disp_ratio = mDispCurrentResidualNorm/mDispInitialResidualNorm; // We calculate the ratio of the LM residual_lm_ratio = mLMCurrentResidualNorm/mLMInitialResidualNorm; KRATOS_ERROR_IF(mOptions.Is(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) << "ERROR::CONTACT LOST::ARE YOU SURE YOU ARE SUPPOSED TO HAVE CONTACT?" << std::endl; // We calculate the absolute norms const TDataType residual_disp_abs = mDispCurrentResidualNorm/disp_dof_num; const TDataType residual_lm_abs = mLMCurrentResidualNorm/lm_dof_num; // The process info of the model part ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); // We print the results // TODO: Replace for the new log if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { std::cout.precision(4); TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); Table << residual_disp_ratio << mDispRatioTolerance << residual_disp_abs << mDispAbsTolerance << residual_lm_ratio << mLMRatioTolerance << residual_lm_abs << mLMAbsTolerance; } else { std::cout.precision(4); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("RESIDUAL CONVERGENCE CHECK") << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tDISPLACEMENT: RATIO = ") << residual_disp_ratio << BOLDFONT(" EXP.RATIO = ") << mDispRatioTolerance << BOLDFONT(" ABS = ") << residual_disp_abs << BOLDFONT(" EXP.ABS = ") << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tLAGRANGE MUL: RATIO = ") << residual_lm_ratio << BOLDFONT(" EXP.RATIO = ") << mLMRatioTolerance << BOLDFONT(" ABS = ") << residual_lm_abs << BOLDFONT(" EXP.ABS = ") << mLMAbsTolerance << std::endl; } else { KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "RESIDUAL CONVERGENCE CHECK" << "\tSTEP: " << r_process_info[STEP] << "\tNL ITERATION: " << r_process_info[NL_ITERATION_NUMBER] << std::endl << std::scientific; KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tDISPLACEMENT: RATIO = " << residual_disp_ratio << " EXP.RATIO = " << mDispRatioTolerance << " ABS = " << residual_disp_abs << " EXP.ABS = " << mDispAbsTolerance << std::endl; KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tLAGRANGE MUL: RATIO = " << residual_lm_ratio << " EXP.RATIO = " << mLMRatioTolerance << " ABS = " << residual_lm_abs << " EXP.ABS = " << mLMAbsTolerance << std::endl; } } } r_process_info[CONVERGENCE_RATIO] = (residual_disp_ratio > residual_lm_ratio) ? residual_disp_ratio : residual_lm_ratio; r_process_info[RESIDUAL_NORM] = (residual_lm_abs > mLMAbsTolerance) ? residual_lm_abs : mLMAbsTolerance; // We check if converged const bool disp_converged = (residual_disp_ratio <= mDispRatioTolerance || residual_disp_abs <= mDispAbsTolerance); const bool lm_converged = (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::ENSURE_CONTACT) && residual_lm_ratio == 0.0) ? true : (residual_lm_ratio <= mLMRatioTolerance || residual_lm_abs <= mLMAbsTolerance); if (disp_converged && lm_converged ) { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& Table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) Table << BOLDFONT(FGRN(" Achieved")); else Table << "Achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FGRN("achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is achieved" << std::endl; } } return true; } else { if (rModelPart.GetCommunicator().MyPID() == 0 && this->GetEchoLevel() > 0) { if (r_process_info.Has(TABLE_UTILITY)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) r_table << BOLDFONT(FRED(" Not achieved")); else r_table << "Not achieved"; } else { if (mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::PRINTING_OUTPUT)) KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << BOLDFONT("\tResidual") << " convergence is " << BOLDFONT(FRED(" not achieved")) << std::endl; else KRATOS_INFO("DisplacementLagrangeMultiplierResidualContactCriteria") << "\tResidual convergence is not achieved" << std::endl; } } return false; } } else // In this case all the displacements are imposed! return true; } /** * @brief This function initialize the convergence criteria * @param rModelPart Reference to the ModelPart containing the contact problem. (unused) */ void Initialize( ModelPart& rModelPart) override { BaseType::mConvergenceCriteriaIsInitialized = true; ProcessInfo& r_process_info = rModelPart.GetProcessInfo(); if (r_process_info.Has(TABLE_UTILITY) && mOptions.IsNot(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED)) { TablePrinterPointerType p_table = r_process_info[TABLE_UTILITY]; auto& r_table = p_table->GetTable(); r_table.AddColumn("DP RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("LM RATIO", 10); r_table.AddColumn("EXP. RAT", 10); r_table.AddColumn("ABS", 10); r_table.AddColumn("EXP. ABS", 10); r_table.AddColumn("CONVERGENCE", 15); mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::TABLE_IS_INITIALIZED, true); } } /** * @brief This function initializes the solution step * @param rModelPart Reference to the ModelPart containing the contact problem. * @param rDofSet Reference to the container of the problem's degrees of freedom (stored by the BuilderAndSolver) * @param rA System matrix (unused) * @param rDx Vector of results (variations on nodal variables) * @param rb RHS vector (residual) */ void InitializeSolutionStep( ModelPart& rModelPart, DofsArrayType& rDofSet, const TSystemMatrixType& rA, const TSystemVectorType& rDx, const TSystemVectorType& rb ) override { // Initialize flag mOptions.Set(DisplacementLagrangeMultiplierResidualContactCriteria::INITIAL_RESIDUAL_IS_SET, false); // Filling mActiveDofs when MPC exist ConstraintUtilities::ComputeActiveDofs(rModelPart, mActiveDofs, rDofSet); } ///@} ///@name Operations ///@{ ///@} ///@name Acces ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ Flags mOptions; /// Local flags TDataType mDispRatioTolerance; /// The ratio threshold for the norm of the displacement residual TDataType mDispAbsTolerance; /// The absolute value threshold for the norm of the displacement residual TDataType mDispInitialResidualNorm; /// The reference norm of the displacement residual TDataType mDispCurrentResidualNorm; /// The current norm of the displacement residual TDataType mLMRatioTolerance; /// The ratio threshold for the norm of the LM residual TDataType mLMAbsTolerance; /// The absolute value threshold for the norm of the LM residual TDataType mLMInitialResidualNorm; /// The reference norm of the LM residual TDataType mLMCurrentResidualNorm; /// The current norm of the LM residual std::vector<bool> mActiveDofs; /// This vector contains the dofs that are active ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@} ///@name Serialization ///@{ ///@name Private Inquiry ///@{ ///@} ///@name Unaccessible methods ///@{ ///@} }; // Kratos DisplacementLagrangeMultiplierResidualContactCriteria ///@name Local flags creation ///@{ /// Local Flags template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::ENSURE_CONTACT(Kratos::Flags::Create(0)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_ENSURE_CONTACT(Kratos::Flags::Create(0, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::PRINTING_OUTPUT(Kratos::Flags::Create(1)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_PRINTING_OUTPUT(Kratos::Flags::Create(1, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::TABLE_IS_INITIALIZED(Kratos::Flags::Create(2)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_TABLE_IS_INITIALIZED(Kratos::Flags::Create(2, false)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3)); template<class TSparseSpace, class TDenseSpace> const Kratos::Flags DisplacementLagrangeMultiplierResidualContactCriteria<TSparseSpace, TDenseSpace>::NOT_INITIAL_RESIDUAL_IS_SET(Kratos::Flags::Create(3, false)); } #endif /* KRATOS_DISPLACEMENT_LAGRANGE_MULTIPLIER_RESIDUAL_CONTACT_CRITERIA_H */
isogeometric_post_utility.h
// // Project Name: Kratos // Last Modified by: $Author: hbui $ // Date: $Date: 2013-10-12 $ // Revision: $Revision: 1.0 $ // // #if !defined(KRATOS_ISOGEOMETRIC_POST_UTILITY_H_INCLUDED ) #define KRATOS_ISOGEOMETRIC_POST_UTILITY_H_INCLUDED // System includes #include <string> #include <vector> #include <tuple> #include <iostream> // External includes #include <omp.h> #include "boost/progress.hpp" // Project includes #include "includes/define.h" #include "includes/model_part.h" #include "includes/node.h" #include "includes/element.h" #include "includes/properties.h" #include "utilities/openmp_utils.h" #include "custom_utilities/iga_define.h" #include "custom_utilities/isogeometric_utility.h" #define USE_TRIANGULATION_UTILS_FOR_TRIANGULATION #if defined(USE_TRIANGULATION_UTILS_FOR_TRIANGULATION) #include "custom_utilities/triangulation_utils.h" #elif defined(USE_CGAL_FOR_TRIANGULATION) && defined(ISOGEOMETRIC_APPLICATION_USE_CGAL) #include <CGAL/Exact_predicates_inexact_constructions_kernel.h> #include <CGAL/Exact_predicates_exact_constructions_kernel.h> #include <CGAL/Delaunay_triangulation_2.h> #include <CGAL/Triangulation_vertex_base_with_info_2.h> #endif namespace Kratos { ///@addtogroup IsogeometricApplication ///@{ ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** * Abstract class for all utility to export mesh from NURBS. Also to provide basic utility functions. */ class IsogeometricPostUtility : public IsogeometricUtility { public: ///@name Type Definitions ///@{ typedef typename ModelPart::NodesContainerType NodesArrayType; typedef typename ModelPart::ElementsContainerType ElementsArrayType; typedef typename ModelPart::ConditionsContainerType ConditionsArrayType; typedef typename Element::GeometryType GeometryType; typedef typename GeometryType::PointType NodeType; typedef typename NodeType::PointType PointType; typedef typename GeometryType::IntegrationPointsArrayType IntegrationPointsArrayType; typedef typename GeometryType::CoordinatesArrayType CoordinatesArrayType; typedef typename NodeType::DofsContainerType DofsContainerType; typedef std::size_t IndexType; /// Pointer definition of IsogeometricPostUtility KRATOS_CLASS_POINTER_DEFINITION(IsogeometricPostUtility); ///@} ///@name Life Cycle ///@{ /// Default constructor. IsogeometricPostUtility() { } /// Destructor. virtual ~IsogeometricPostUtility() { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ /// Create a node for a model_part with a specific Id and transfer the values template<class TPatchType, typename TCoordinatesType, typename TIndexType> static typename NodeType::Pointer CreateNodeAndTransferValues(const TCoordinatesType& p_ref, const TPatchType& rPatch, ModelPart& r_model_part, const TIndexType& NodeCounter) { typename NodeType::Pointer pNewNode = CreateNode(p_ref, rPatch, r_model_part, NodeCounter); TransferValuesToNodes(*pNewNode, p_ref, rPatch); return pNewNode; } /// Create a node for a model_part with a specific Id template<class TPatchType, typename TCoordinatesType, typename TIndexType> static typename NodeType::Pointer CreateNode(const TCoordinatesType& p_ref, const TPatchType& rPatch, ModelPart& r_model_part, const TIndexType& NodeCounter) { typename TPatchType::ControlPointType p = rPatch.pControlPointGridFunction()->GetValue(p_ref); typename NodeType::Pointer pNewNode = r_model_part.CreateNewNode(NodeCounter, p.X(), p.Y(), p.Z()); return pNewNode; } /// Transfer the control values from patch to node /// The node has to be inside the patch template<class TPatchType> static void TransferValuesToNodes(NodeType& rNode, const TPatchType& rPatch) { typename TPatchType::Array1DGridFunctionType::ConstPointer pControlPointCoordinatesGridFunction = rPatch.pGetGridFunction(CONTROL_POINT_COORDINATES); typename TPatchType::Array1DGridFunctionType::DataType p_ref; pControlPointCoordinatesGridFunction->LocalCoordinates(rNode, p_ref); TransferValuesToNodes(rNode, p_ref, rPatch); } /// Transfer the control values from patch to node /// p_ref is the local coordinates of the node in patch template<class TPatchType, typename TCoordinatesType> static void TransferValuesToNodes(NodeType& rNode, const TCoordinatesType& p_ref, const TPatchType& rPatch) { typedef typename TPatchType::DoubleGridFunctionContainerType DoubleGridFunctionContainerType; typedef typename TPatchType::Array1DGridFunctionContainerType Array1DGridFunctionContainerType; typedef typename TPatchType::VectorGridFunctionContainerType VectorGridFunctionContainerType; // transfer the control values DoubleGridFunctionContainerType DoubleGridFunctions_ = rPatch.DoubleGridFunctions(); for (typename DoubleGridFunctionContainerType::const_iterator it_gf = DoubleGridFunctions_.begin(); it_gf != DoubleGridFunctions_.end(); ++it_gf) { typedef double DataType; typedef Variable<DataType> VariableType; const std::string& var_name = (*it_gf)->pControlGrid()->Name(); if (KratosComponents<VariableData>::Has(var_name)) { VariableType* pVariable = dynamic_cast<VariableType*>(&KratosComponents<VariableData>::Get(var_name)); DataType value = (*it_gf)->GetValue(p_ref); if (rNode.SolutionStepsDataHas(*pVariable)) rNode.GetSolutionStepValue(*pVariable) = value; } } Array1DGridFunctionContainerType Array1DGridFunctions_ = rPatch.Array1DGridFunctions(); for (typename Array1DGridFunctionContainerType::const_iterator it_gf = Array1DGridFunctions_.begin(); it_gf != Array1DGridFunctions_.end(); ++it_gf) { typedef array_1d<double, 3> DataType; typedef Variable<DataType> VariableType; const std::string& var_name = (*it_gf)->pControlGrid()->Name(); if (var_name == "CONTROL_POINT_COORDINATES") continue; if (KratosComponents<VariableData>::Has(var_name)) { VariableType* pVariable = dynamic_cast<VariableType*>(&KratosComponents<VariableData>::Get(var_name)); DataType value = (*it_gf)->GetValue(p_ref); if (rNode.SolutionStepsDataHas(*pVariable)) rNode.GetSolutionStepValue(*pVariable) = value; } } VectorGridFunctionContainerType VectorGridFunctions_ = rPatch.VectorGridFunctions(); for (typename VectorGridFunctionContainerType::const_iterator it_gf = VectorGridFunctions_.begin(); it_gf != VectorGridFunctions_.end(); ++it_gf) { typedef Vector DataType; typedef Variable<DataType> VariableType; const std::string& var_name = (*it_gf)->pControlGrid()->Name(); if (KratosComponents<VariableData>::Has(var_name)) { VariableType* pVariable = dynamic_cast<VariableType*>(&KratosComponents<VariableData>::Get(var_name)); DataType value = (*it_gf)->GetValue(p_ref); if (rNode.SolutionStepsDataHas(*pVariable)) rNode.GetSolutionStepValue(*pVariable) = value; } } } /// Transfer the control values from patch to Gauss points template<class TEntityType, typename TVariableType, class TPatchType> static void TransferValuesToGaussPoints(TEntityType& rElement, const TVariableType& rVariable, const TPatchType& rPatch, const ProcessInfo& rProcessInfo) { GeometryData::IntegrationMethod ThisIntegrationMethod = rElement.GetIntegrationMethod(); GeometryType& rGeometry = rElement.GetGeometry(); typename TPatchType::Array1DGridFunctionType::ConstPointer pControlPointCoordinatesGridFunction = rPatch.pGetGridFunction(CONTROL_POINT_COORDINATES); typename GridFunction<TPatchType::FESpaceType::Dim(), typename TVariableType::Type>::ConstPointer pGridFunc = rPatch.pGetGridFunction(rVariable); #ifdef ENABLE_BEZIER_GEOMETRY //initialize the geometry rGeometry.Initialize(ThisIntegrationMethod); #endif const IntegrationPointsArrayType& integration_points = rGeometry.IntegrationPoints(ThisIntegrationMethod); std::vector<typename TVariableType::Type> ValuesOnIntPoint(integration_points.size()); CoordinatesArrayType GlobalCoords; typename TPatchType::Array1DGridFunctionType::DataType p_ref; for (unsigned int PointNumber = 0; PointNumber < integration_points.size(); ++PointNumber) { rGeometry.GlobalCoordinates(GlobalCoords, integration_points[PointNumber]); typename TPatchType::Array1DGridFunctionType::ConstPointer pControlPointCoordinatesGridFunction = rPatch.pGetGridFunction(CONTROL_POINT_COORDINATES); pControlPointCoordinatesGridFunction->LocalCoordinates(GlobalCoords, p_ref); ValuesOnIntPoint[PointNumber] = pGridFunc->GetValue(p_ref); } #ifdef ENABLE_BEZIER_GEOMETRY // clean the geometry rGeometry.Clean(); #endif rElement.SetValueOnIntegrationPoints( rVariable, ValuesOnIntPoint, rProcessInfo); } /// Generate corner points for regular geometry template<int TDim, typename TCoordinatesType, typename TValueType> static void GenerateRegular(std::vector<TCoordinatesType>& points, const std::vector<TCoordinatesType>& cmin, const std::vector<TCoordinatesType>& cmax) { if (TDim == 2) { GenerateRectangle(points, cmin[0], cmax[0], cmin[1], cmax[1]); } else if (TDim == 3) { GenerateBox(points, cmin[0], cmax[0], cmin[1], cmax[1], cmin[2], cmax[2]); } else KRATOS_THROW_ERROR(std::logic_error, "Invalid dimension", TDim) } /// Generate a single rectangle. The 4 corner points are denoted as /// 4---3 /// | | /// 1---2 template<typename TCoordinatesType, typename TValueType> static void GenerateRectangle(std::vector<TCoordinatesType>& points, const TValueType& xmin, const TValueType& xmax, const TValueType& ymin, const TValueType& ymax) { points[0][0] = xmin; points[0][1] = ymin; // points[0][2] = 0.0; points[1][0] = xmax; points[1][1] = ymin; // points[1][2] = 0.0; points[2][0] = xmax; points[2][1] = ymax; // points[2][2] = 0.0; points[3][0] = xmin; points[3][1] = ymax; // points[3][2] = 0.0; } /// Generate the triangulation for a list of points in 3D /// The triangulation will be performed on the physical points with the information {cemter, normal, t1, t2} template<typename TCoordinatesType, typename TVectorType, typename TIndexType> static std::vector<std::vector<TIndexType> > GenerateTriangleGrid(const std::vector<TCoordinatesType>& points, const TVectorType& rCenter, const TVectorType& rNormal, const TVectorType& rTangent1, const TVectorType& rTangent2) { // create the 2D coordinates for points, in order to triangulate std::vector<double> XY; TCoordinatesType Projection; for (std::size_t i = 0; i < points.size(); ++i) { noalias(Projection) = points[i] - inner_prod(points[i] - rCenter, rNormal) * rNormal; XY.push_back(inner_prod(Projection - rCenter, rTangent1)); XY.push_back(inner_prod(Projection - rCenter, rTangent2)); } // std::cout << "XY:" << std::endl; // for (std::size_t i = 0; i < XY.size()/2; ++i) // std::cout << " " << XY[2*i] << " " << XY[2*i+1] << std::endl; // compute the triangulation typedef std::vector<std::vector<TIndexType> > connectivity_t; connectivity_t Connectivities; #if defined(USE_CGAL_FOR_TRIANGULATION) typedef CGAL::Exact_predicates_inexact_constructions_kernel Kernel; typedef CGAL::Triangulation_vertex_base_with_info_2<unsigned int, Kernel> Vb; typedef CGAL::Triangulation_data_structure_2<Vb> Tds; typedef CGAL::Delaunay_triangulation_2<Kernel, Tds> Delaunay; typedef Kernel::Point_2 Point2; std::vector< std::pair<Point2, unsigned int> > clipped_points; for(std::size_t i = 0; i < XY.size() / 2; ++i) { clipped_points.push_back( std::make_pair( Point2(XY[2*i], XY[2*i+1]), i ) ); } Delaunay triangulation; triangulation.insert(clipped_points.begin(), clipped_points.end()); for(Delaunay::Finite_faces_iterator fit = triangulation.finite_faces_begin(); fit != triangulation.finite_faces_end(); ++fit) { Delaunay::Face_handle face = fit; std::vector<unsigned int> con(3); con[0] = face->vertex(0)->info(); con[1] = face->vertex(1)->info(); con[2] = face->vertex(2)->info(); Connectivities.push_back(con); } #elif defined(USE_TRIANGULATION_UTILS_FOR_TRIANGULATION) TriangulationUtils tri_util; tri_util.ComputeDelaunayTriangulation(XY, Connectivities); #else // REMARK: a tool to perform triangulation is not defined. You must define it. KRATOS_THROW_ERROR(std::logic_error, "A triangulation method must be specialized", "") #endif return Connectivities; } /// Generate the triangulation for a list of points in 3D /// The triangulation will be performed on the physical points with the information {cemter, normal, t1, t2} /// The refinement is performed on the local points instead. template<typename TCoordinatesType, typename TVectorType, typename TIndexType> static std::pair<std::vector<TCoordinatesType>, std::vector<std::vector<TIndexType> > > GenerateTriangleGrid(const std::vector<TCoordinatesType>& physical_points, const TVectorType& rCenter, const TVectorType& rNormal, const TVectorType& rTangent1, const TVectorType& rTangent2, const std::vector<TCoordinatesType>& local_points, const TIndexType& offset, const std::size_t& nrefine) { // compute the triangulation typedef std::vector<std::vector<TIndexType> > connectivity_t; connectivity_t Connectivities = GenerateTriangleGrid<TCoordinatesType, TVectorType, TIndexType>(physical_points, rCenter, rNormal, rTangent1, rTangent2); // refine if needed std::vector<TCoordinatesType> new_points = local_points; for (std::size_t i = 0; i < nrefine; ++i) RefineTriangleGrid<TIndexType, TCoordinatesType>(new_points, Connectivities); // offset the connectivity for (std::size_t i = 0; i < Connectivities.size(); ++i) for (std::size_t j = 0; j < Connectivities[i].size(); ++j) Connectivities[i][j] += offset; return std::make_pair(new_points, Connectivities); } /// Generate the quadrilateral grid. The 4 corner points are denoted as /// 4---3 /// | | /// 1---2 template<typename TCoordinatesType, typename TIndexType> static std::pair<std::vector<TCoordinatesType>, std::vector<std::vector<TIndexType> > > GenerateQuadGrid(const TCoordinatesType& p1, const TCoordinatesType& p2, const TCoordinatesType& p3, const TCoordinatesType& p4, const TIndexType& starting_node_id, const std::size_t& num_div_1, const std::size_t& num_div_2) { TCoordinatesType p, pm, pn; std::vector<TCoordinatesType> points; std::vector<std::vector<TIndexType> > connectivities; double xi, eta; std::size_t i, j; for (i = 0; i <= num_div_1; ++i) { xi = ((double) i) / num_div_1; pm = p1 + xi*(p2 - p1); pn = p4 + xi*(p3 - p4); for (j = 0; j <= num_div_2; ++j) { eta = ((double) j) / num_div_2; p = pm + eta*(pn - pm); points.push_back(p); } } TIndexType n1, n2, n3, n4; for (i = 0; i < num_div_1; ++i) { for(j = 0; j < num_div_2; ++j) { n1 = starting_node_id + i * (num_div_2 + 1) + j; n2 = starting_node_id + i * (num_div_2 + 1) + j + 1; n3 = starting_node_id + (i + 1) * (num_div_2 + 1) + j; n4 = starting_node_id + (i + 1) * (num_div_2 + 1) + j + 1; connectivities.push_back(std::vector<std::size_t>{n1, n2, n4, n3}); } } return std::make_pair(points, connectivities); } /// Generate a single box. The 8 corner points are denoted as /// 4---3 8---7 /// | | --> | | /// 1---2 5---6 template<typename TCoordinatesType, typename TValueType> static void GenerateBox(std::vector<TCoordinatesType>& points, const TValueType& xmin, const TValueType& xmax, const TValueType& ymin, const TValueType& ymax, const TValueType& zmin, const TValueType& zmax) { points[0][0] = xmin; points[0][1] = ymin; points[0][2] = zmin; points[1][0] = xmax; points[1][1] = ymin; points[1][2] = zmin; points[2][0] = xmax; points[2][1] = ymax; points[2][2] = zmin; points[3][0] = xmin; points[3][1] = ymax; points[3][2] = zmin; points[4][0] = xmin; points[4][1] = ymin; points[4][2] = zmax; points[5][0] = xmax; points[5][1] = ymin; points[5][2] = zmax; points[6][0] = xmax; points[6][1] = ymax; points[6][2] = zmax; points[7][0] = xmin; points[7][1] = ymax; points[7][2] = zmax; } /// Generate the hexahedral grid. The 8 corner points are denoted as /// 4---3 8---7 /// | | --> | | /// 1---2 5---6 template<typename TCoordinatesType, typename TIndexType> static std::pair<std::vector<TCoordinatesType>, std::vector<std::vector<TIndexType> > > GenerateHexGrid(const TCoordinatesType& p1, const TCoordinatesType& p2, const TCoordinatesType& p3, const TCoordinatesType& p4, const TCoordinatesType& p5, const TCoordinatesType& p6, const TCoordinatesType& p7, const TCoordinatesType& p8, const TIndexType& starting_node_id, const std::size_t& num_div_1, const std::size_t& num_div_2, const std::size_t& num_div_3) { TCoordinatesType p, pm1, pn1, pm2, pn2, pq1, pq2; std::vector<TCoordinatesType> points; std::vector<std::vector<TIndexType> > connectivities; double xi, eta, zeta; std::size_t i, j, k; for (i = 0; i <= num_div_1; ++i) { xi = ((double) i) / num_div_1; pm1 = p1 + xi*(p2 - p1); pn1 = p4 + xi*(p3 - p4); pm2 = p5 + xi*(p6 - p5); pn2 = p8 + xi*(p7 - p8); for (j = 0; j <= num_div_2; ++j) { eta = ((double) j) / num_div_2; pq1 = pm1 + eta*(pn1 - pm1); pq2 = pm2 + eta*(pn2 - pm2); for (k = 0; k <= num_div_3; ++k) { zeta = ((double) k) / num_div_3; p = pq1 + zeta*(pq2-pq1); points.push_back(p); } } } // std::cout << "points:" << std::endl; // for (std::size_t i = 0; i < points.size(); ++i) // std::cout << " " << points[i] << std::endl; // std::cout << std::endl; TIndexType n1, n2, n3, n4, n5, n6, n7, n8; for (i = 0; i < num_div_1; ++i) { for (j = 0; j < num_div_2; ++j) { for (k = 0; k < num_div_3; ++k) { IndexType n1 = starting_node_id + (i * (num_div_2 + 1) + j) * (num_div_3 + 1) + k; IndexType n2 = starting_node_id + (i * (num_div_2 + 1) + j + 1) * (num_div_3 + 1) + k; IndexType n3 = starting_node_id + ((i + 1) * (num_div_2 + 1) + j) * (num_div_3 + 1) + k; IndexType n4 = starting_node_id + ((i + 1) * (num_div_2 + 1) + j + 1) * (num_div_3 + 1) + k; IndexType n5 = n1 + 1; IndexType n6 = n2 + 1; IndexType n7 = n3 + 1; IndexType n8 = n4 + 1; connectivities.push_back(std::vector<std::size_t>{n1, n2, n4, n3, n5, n6, n8, n7}); } } } // std::cout << "connectivities:" << std::endl; // for (std::size_t i = 0; i < connectivities.size(); ++i) // { // std::cout << " "; // for (std::size_t j = 0; j < connectivities[i].size(); ++j) // std::cout << " " << connectivities[i][j]; // std::cout << std::endl; // } // std::cout << std::endl; return std::make_pair(points, connectivities); } /// Refine a triangle grid by sub-divide a triangle into 4 sub-triangles. template<typename TIndexType = std::size_t, typename TCoordinatesType = std::vector<double>, typename TCoordinatesListType = std::vector<TCoordinatesType>, typename TConnectivityType = std::vector<TIndexType>, typename TConnectivityListType = std::vector<TConnectivityType> > static void RefineTriangleGrid(TCoordinatesListType& Points, TConnectivityListType& Connectivities) { std::size_t npoints = Points.size(); TIndexType last_id = static_cast<TIndexType>(npoints-1); // generate the new middle points typedef std::pair<TIndexType, TIndexType> key_t; std::map<key_t, TIndexType> map_corner_to_middle; key_t key1, key2; TIndexType n1, n2, n3; for (typename TConnectivityListType::iterator it = Connectivities.begin(); it != Connectivities.end(); ++it) { n1 = (*it)[0]; n2 = (*it)[1]; n3 = (*it)[2]; key1 = std::make_pair(n1, n2); key2 = std::make_pair(n2, n1); if (map_corner_to_middle.find(key1) == map_corner_to_middle.end()) { Points.push_back(0.5*(Points[n1] + Points[n2])); map_corner_to_middle[key1] = ++last_id; map_corner_to_middle[key2] = last_id; } key1 = std::make_pair(n2, n3); key2 = std::make_pair(n3, n2); if (map_corner_to_middle.find(key1) == map_corner_to_middle.end()) { Points.push_back(0.5*(Points[n2] + Points[n3])); map_corner_to_middle[key1] = ++last_id; map_corner_to_middle[key2] = last_id; } key1 = std::make_pair(n3, n1); key2 = std::make_pair(n1, n3); if (map_corner_to_middle.find(key1) == map_corner_to_middle.end()) { Points.push_back(0.5*(Points[n3] + Points[n1])); map_corner_to_middle[key1] = ++last_id; map_corner_to_middle[key2] = last_id; } } // generate new triangles TIndexType m1, m2, m3; TConnectivityListType Connectivities_old = Connectivities; Connectivities.clear(); for (typename TConnectivityListType::iterator it = Connectivities_old.begin(); it != Connectivities_old.end(); ++it) { n1 = (*it)[0]; n2 = (*it)[1]; n3 = (*it)[2]; m1 = map_corner_to_middle[std::make_pair(n1, n2)]; m2 = map_corner_to_middle[std::make_pair(n2, n3)]; m3 = map_corner_to_middle[std::make_pair(n3, n1)]; Connectivities.push_back(TConnectivityType{n1, m1, m3}); Connectivities.push_back(TConnectivityType{m1, n2, m2}); Connectivities.push_back(TConnectivityType{m1, m2, m3}); Connectivities.push_back(TConnectivityType{m2, n3, m3}); } } /// Find the entity of the same type in the list of entities template<class TEntityType, class TEntitiesContainerType> static TEntitiesContainerType FindEntities(TEntitiesContainerType& pEntities, TEntityType const& r_sample_entity) { TEntitiesContainerType pFoundEntities; for (typename TEntitiesContainerType::ptr_iterator it = pEntities.ptr_begin(); it != pEntities.ptr_end(); ++it) { if (typeid(*(*it)) == typeid(r_sample_entity)) if (typeid((*it)->GetGeometry()) == typeid(r_sample_entity.GetGeometry())) pFoundEntities.push_back(*it); } return pFoundEntities; } /// Create the entities based on the connectivities /// It is noted that the newly created entities are not added to the other model_part. User must do it manually. template<typename TConnectivityType, typename TEntityType, typename TEntitiesContainerType> static TEntitiesContainerType CreateEntities( const TConnectivityType& r_connectivities, ModelPart& r_model_part, TEntityType const& r_sample_entity, std::size_t& last_entity_id, Properties::Pointer pProperties, const std::string& NodeKey) { TEntitiesContainerType pNewEntities; typename TEntityType::NodesArrayType temp_entity_nodes; for (typename TConnectivityType::const_iterator it = r_connectivities.begin(); it != r_connectivities.end(); ++it) { temp_entity_nodes.clear(); for (typename TConnectivityType::value_type::const_iterator it2 = it->begin(); it2 != it->end(); ++it2) temp_entity_nodes.push_back(*(FindKey(r_model_part.Nodes(), *it2, NodeKey).base())); typename TEntityType::Pointer pNewEntity = r_sample_entity.Create(++last_entity_id, temp_entity_nodes, pProperties); pNewEntities.push_back(pNewEntity); } return pNewEntities; } /// Create a list of entities (element/condition) from a model_part to another model_part /// It is noted that the newly created entities are not added to the other model_part. User must do it manually. template<class TEntityType, class TEntitiesContainerType> static TEntitiesContainerType CreateEntities( TEntitiesContainerType& pEntities, ModelPart& r_other_model_part, TEntityType const& r_sample_entity, std::size_t& last_entity_id, Properties::Pointer pProperties, const bool& retain_prop_id = false) { // first collect all the nodes from the elements std::map<std::size_t, NodeType::Pointer> pNodes; for (typename TEntitiesContainerType::ptr_iterator it = pEntities.ptr_begin(); it != pEntities.ptr_end(); ++it) { for (std::size_t i = 0; i < (*it)->GetGeometry().size(); ++i) { pNodes[(*it)->GetGeometry()[i].Id()] = (*it)->GetGeometry().pGetPoint(i); } } // create the new nodes in the other model_part std::size_t last_node_id = GetLastNodeId(r_other_model_part); std::map<std::size_t, std::size_t> MapOldToNew; for (std::map<std::size_t, NodeType::Pointer>::iterator it = pNodes.begin(); it != pNodes.end(); ++it) { const PointType& rPoint = it->second->GetInitialPosition(); NodeType::Pointer pNewNode = r_other_model_part.CreateNewNode(++last_node_id, rPoint[0], rPoint[1], rPoint[2]); MapOldToNew[it->second->Id()] = last_node_id; } // create new elements in the other model_part const std::string NodeKey = std::string("Node"); typename TEntityType::NodesArrayType temp_entity_nodes; TEntitiesContainerType pNewEntities; for (typename TEntitiesContainerType::ptr_iterator it = pEntities.ptr_begin(); it != pEntities.ptr_end(); ++it) { temp_entity_nodes.clear(); for (std::size_t i = 0; i < (*it)->GetGeometry().size(); ++i) { std::size_t node_id = MapOldToNew[(*it)->GetGeometry()[i].Id()]; temp_entity_nodes.push_back(*(FindKey(r_other_model_part.Nodes(), node_id, NodeKey).base())); } if (!retain_prop_id) { pNewEntities.push_back(r_sample_entity.Create(++last_entity_id, temp_entity_nodes, pProperties)); } else { Properties::Pointer pNewProperties = r_other_model_part.pGetProperties((*it)->GetProperties().Id()); pNewEntities.push_back(r_sample_entity.Create(++last_entity_id, temp_entity_nodes, pNewProperties)); } } return pNewEntities; } // /// Create conditions/elements from the list of points. // /// The triangulation will be performed on the physical points with the information {cemter, normal, t1, t2} // /// The refinement is performed on the local points instead. // /// The point list will be triangulated before the conditions are created. // /// It is noted that the newly created entities are not added to the other model_part. User must do it manually. Nevertheless, the nodes are added to the model_part. // /// The new node will be created from last_node_id+1 // template<typename TPointType, typename TVectorType, class TEntityType, class TPointsContainerType, class TNodesContainerType, class TEntitiesContainerType> // static std::tuple<TPointsContainerType, TNodesContainerType, TEntitiesContainerType> CreateEntities( // const TPointsContainerType& physical_points, // const TVectorType& rCenter, // const TVectorType& rNormal, // const TVectorType& rTangent1, // const TVectorType& rTangent2, // const TPointsContainerType& local_points, // const std::size_t& nrefine, // ModelPart& r_model_part, // TEntityType const& r_sample_entity, // std::size_t& last_node_id, // std::size_t& last_entity_id, // Properties::Pointer pProperties) // { // // compute the triangulation // typedef unsigned int IndexType; // typedef std::vector<std::vector<IndexType> > connectivity_t; // connectivity_t Connectivities = GenerateTriangleGrid<TPointType, TVectorType, IndexType>(physical_points, rCenter, rNormal, rTangent1, rTangent2); // // refine if needed // TPointsContainerType new_points = local_points; // for (std::size_t i = 0; i < nrefine; ++i) // RefineTriangleGrid<unsigned int, TPointType>(new_points, Connectivities); // // offset the connectivity // for (std::size_t i = 0; i < Connectivities.size(); ++i) // for (std::size_t j = 0; j < Connectivities[i].size(); ++j) // Connectivities[i][j] += last_node_id+1; // // std::cout << "Connectivities:" << std::endl; // // for (std::size_t i = 0; i < Connectivities.size(); ++i) // // { // // std::cout << " " << i << ":"; // // for (std::size_t j = 0; j < Connectivities[i].size(); ++j) // // std::cout << " " << Connectivities[i][j]; // // std::cout << std::endl; // // } // // create the nodes // std::vector<std::size_t> map_con_to_mp(new_points.size()); // TNodesContainerType pNewNodes; // for (std::size_t i = 0; i < new_points.size(); ++i) // { // NodeType::Pointer pNewNode = r_model_part.CreateNewNode(++last_node_id, new_points[i][0], new_points[i][1], new_points[i][2]); // map_con_to_mp[i] = pNewNode->Id(); // pNewNodes.push_back(pNewNode); // } // // create the entities based on connectivity // const std::string NodeKey = std::string("Node"); // TEntitiesContainerType pNewEntities = CreateEntities<connectivity_t, TEntityType, TEntitiesContainerType>(Connectivities, r_model_part, // r_sample_entity, last_entity_id, pProperties, NodeKey); // return std::make_tuple(new_points, pNewNodes, pNewEntities); // } //**********AUXILIARY FUNCTION************************************************************** // Construct the matrix structure for high performance assembling // This subroutine shall only be used to construct the matrix structure for L2 projection // using in post-processing //****************************************************************************************** template<typename TElementType, typename TCompressedMatrixType, typename TElementsArrayType> static void ConstructL2MatrixStructure ( TCompressedMatrixType& A, TElementsArrayType& rElements, std::map<std::size_t, std::size_t> MapNodeIdToVec) { std::size_t equation_size = A.size1(); std::vector<std::vector<std::size_t> > indices(equation_size); typename TElementType::EquationIdVectorType ids; for(typename TElementsArrayType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; ++i_element) { ids.resize((i_element)->GetGeometry().size()); for(unsigned int i = 0; i < (i_element)->GetGeometry().size(); ++i) ids[i] = MapNodeIdToVec[(i_element)->GetGeometry()[i].Id()]; for(std::size_t i = 0 ; i < ids.size() ; ++i) { if(ids[i] < equation_size) { std::vector<std::size_t>& row_indices = indices[ids[i]]; for(std::size_t j = 0 ; j < ids.size() ; ++j) { if(ids[j] < equation_size) AddUnique(row_indices, ids[j]); } } } } //allocating the memory needed int data_size = 0; for(std::size_t i = 0 ; i < indices.size() ; ++i) { data_size += indices[i].size(); } A.reserve(data_size, false); //filling with zero the matrix (creating the structure) #ifndef _OPENMP for(std::size_t i = 0 ; i < indices.size() ; ++i) { std::vector<std::size_t>& row_indices = indices[i]; std::sort(row_indices.begin(), row_indices.end()); for(std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end() ; ++it) { A.push_back(i, *it, 0.00); } row_indices.clear(); } #else int number_of_threads = omp_get_max_threads(); std::vector<unsigned int> matrix_partition; OpenMPUtils::CreatePartition(number_of_threads, indices.size(), matrix_partition); for( int k=0; k < number_of_threads; ++k ) { #pragma omp parallel if( omp_get_thread_num() == k ) { for( std::size_t i = matrix_partition[k]; i < matrix_partition[k+1]; i++ ) { std::vector<std::size_t>& row_indices = indices[i]; std::sort(row_indices.begin(), row_indices.end()); for(std::vector<std::size_t>::iterator it = row_indices.begin(); it != row_indices.end() ; ++it) { A.push_back(i, *it, 0.00); } row_indices.clear(); } } } #endif } //**********AUXILIARY FUNCTION************************************************************** // Construct the matrix structure for high performance assembling // This subroutine shall only be used to construct the matrix structure for L2 projection // using in post-processing //****************************************************************************************** template<typename TElementType, typename TCompressedMatrixType, typename TElementsArrayType> static void ConstructL2MatrixStructure ( TCompressedMatrixType& A, TElementsArrayType& rElements) { std::size_t equation_size = A.size1(); std::vector<std::vector<std::size_t> > indices(equation_size); typename TElementType::EquationIdVectorType ids; for(typename TElementsArrayType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; ++i_element) { ids.resize((i_element)->GetGeometry().size()); for(unsigned int i = 0; i < (i_element)->GetGeometry().size(); ++i) ids[i] = (i_element)->GetGeometry()[i].Id() - 1; for(std::size_t i = 0 ; i < ids.size() ; ++i) { if(ids[i] < equation_size) { std::vector<std::size_t>& row_indices = indices[ids[i]]; for(std::size_t j = 0 ; j < ids.size() ; ++j) { if(ids[j] < equation_size) AddUnique(row_indices, ids[j]); } } } } //allocating the memory needed int data_size = 0; for(std::size_t i = 0 ; i < indices.size() ; ++i) { data_size += indices[i].size(); } A.reserve(data_size, false); //filling with zero the matrix (creating the structure) #ifndef _OPENMP for(std::size_t i = 0 ; i < indices.size() ; i++) { std::vector<std::size_t>& row_indices = indices[i]; std::sort(row_indices.begin(), row_indices.end()); for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++) { A.push_back(i, *it, 0.00); } row_indices.clear(); } #else int number_of_threads = omp_get_max_threads(); std::vector<unsigned int> matrix_partition; OpenMPUtils::CreatePartition(number_of_threads, indices.size(), matrix_partition); for( int k=0; k < number_of_threads; ++k ) { #pragma omp parallel if( omp_get_thread_num() == k ) { for( std::size_t i = matrix_partition[k]; i < matrix_partition[k+1]; i++ ) { std::vector<std::size_t>& row_indices = indices[i]; std::sort(row_indices.begin(), row_indices.end()); for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++) { A.push_back(i, *it, 0.00); } row_indices.clear(); } } } #endif } //**********AUXILIARY FUNCTION************************************************************** // Support function for ConstructMatrixStructure //****************************************************************************************** static inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while ( i != endit && (*i) != candidate) { ++i; } if( i == endit ) { v.push_back(candidate); } } //**********AUXILIARY FUNCTION************************************************************** //****************************************************************************************** static inline double CoordinateScaling(const double& x, const int& Type) { if(Type == _NURBS_) { return x; } else if(Type == _BEZIER_) { return 2 * x - 1; } else return 0.0; } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { std::stringstream buffer; buffer << "IsogeometricPostUtility"; return buffer.str(); } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const { rOStream << "IsogeometricPostUtility"; } /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. IsogeometricPostUtility& operator=(IsogeometricPostUtility const& rOther) { return *this; } /// Copy constructor. IsogeometricPostUtility(IsogeometricPostUtility const& rOther) { } ///@} }; // Class IsogeometricPostUtility ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >>(std::istream& rIStream, IsogeometricPostUtility& rThis) { return rIStream; } /// output stream function inline std::ostream& operator <<(std::ostream& rOStream, const IsogeometricPostUtility& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} ///@} addtogroup block }// namespace Kratos. #endif // KRATOS_ISOGEOMETRIC_POST_UTILITY_H_INCLUDED
kmp_sch_simd_guided.c
// RUN: %libomp-compile-and-run /* Test for the 'schedule(simd:guided)' clause. Compiler needs to generate a dynamic dispatching and pass the schedule value 46 to the OpenMP RTL. Test uses numerous loop parameter combinations. */ #include <stdio.h> #include <omp.h> #include "omp_testsuite.h" #if defined(WIN32) || defined(_WIN32) #include <windows.h> #define delay() Sleep(1); #else #include <unistd.h> #define delay() usleep(10); #endif // uncomment for debug diagnostics: //#define DEBUG #define SIMD_LEN 4 // --------------------------------------------------------------------------- // Various definitions copied from OpenMP RTL enum sched { kmp_sch_static_balanced_chunked = 45, kmp_sch_guided_simd = 46, kmp_sch_runtime_simd = 47, }; typedef unsigned u32; typedef long long i64; typedef unsigned long long u64; typedef struct { int reserved_1; int flags; int reserved_2; int reserved_3; char *psource; } id; extern int __kmpc_global_thread_num(id*); extern void __kmpc_barrier(id*, int gtid); extern void __kmpc_dispatch_init_4(id*, int, enum sched, int, int, int, int); extern void __kmpc_dispatch_init_8(id*, int, enum sched, i64, i64, i64, i64); extern int __kmpc_dispatch_next_4(id*, int, void*, void*, void*, void*); extern int __kmpc_dispatch_next_8(id*, int, void*, void*, void*, void*); // End of definitions copied from OpenMP RTL. // --------------------------------------------------------------------------- static id loc = {0, 2, 0, 0, ";file;func;0;0;;"}; // --------------------------------------------------------------------------- int run_loop_64(i64 loop_lb, i64 loop_ub, i64 loop_st, int loop_chunk) { int err = 0; static int volatile loop_sync = 0; i64 lb; // Chunk lower bound i64 ub; // Chunk upper bound i64 st; // Chunk stride int rc; int tid = omp_get_thread_num(); int gtid = tid; int last; #if DEBUG printf("run_loop_<%d>(lb=%d, ub=%d, st=%d, ch=%d)\n", (int)sizeof(i64), gtid, tid, (int)loop_lb, (int)loop_ub, (int)loop_st, loop_chunk); #endif // Don't test degenerate cases that should have been discovered by codegen if (loop_st == 0) return 0; if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub) return 0; __kmpc_dispatch_init_8(&loc, gtid, kmp_sch_guided_simd, loop_lb, loop_ub, loop_st, loop_chunk); if (tid == 0) { // Let the master thread handle the chunks alone int chunk; // No of current chunk i64 next_lb; // Lower bound of the next chunk i64 last_ub; // Upper bound of the last processed chunk u64 cur; // Number of interations in current chunk u64 max; // Max allowed iterations for current chunk int undersized = 0; chunk = 0; next_lb = loop_lb; max = (loop_ub - loop_lb) / loop_st + 1; // The first chunk can consume all iterations while (__kmpc_dispatch_next_8(&loc, gtid, &last, &lb, &ub, &st)) { ++ chunk; #if DEBUG printf("chunk=%d, lb=%d, ub=%d\n", chunk, (int)lb, (int)ub); #endif // Check if previous chunk (it is not the final chunk) is undersized if (undersized) { printf("Error with chunk %d\n", chunk); err++; } // Check lower and upper bounds if (lb != next_lb) { printf("Error with lb %d, %d, ch %d\n", (int)lb, (int)next_lb, chunk); err++; } if (loop_st > 0) { if (!(ub <= loop_ub)) { printf("Error with ub %d, %d, ch %d\n", (int)ub, (int)loop_ub, chunk); err++; } if (!(lb <= ub)) { printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk); err++; } } else { if (!(ub >= loop_ub)) { printf("Error with ub %d, %d, %d\n", (int)ub, (int)loop_ub, chunk); err++; } if (!(lb >= ub)) { printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk); err++; } }; // if // Stride should not change if (!(st == loop_st)) { printf("Error with st %d, %d, ch %d\n", (int)st, (int)loop_st, chunk); err++; } cur = (ub - lb) / loop_st + 1; // Guided scheduling uses FP computations, so current chunk may // be a bit bigger (+1) than allowed maximum if (!(cur <= max + 1)) { printf("Error with iter %llu, %llu\n", cur, max); err++; } // Update maximum for the next chunk if (cur < max) max = cur; next_lb = ub + loop_st; last_ub = ub; undersized = (cur < loop_chunk); }; // while // Must have at least one chunk if (!(chunk > 0)) { printf("Error with chunk %d\n", chunk); err++; } // Must have the right last iteration index if (loop_st > 0) { if (!(last_ub <= loop_ub)) { printf("Error with last1 %d, %d, ch %d\n", (int)last_ub, (int)loop_ub, chunk); err++; } if (!(last_ub + loop_st > loop_ub)) { printf("Error with last2 %d, %d, %d, ch %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk); err++; } } else { if (!(last_ub >= loop_ub)) { printf("Error with last1 %d, %d, ch %d\n", (int)last_ub, (int)loop_ub, chunk); err++; } if (!(last_ub + loop_st < loop_ub)) { printf("Error with last2 %d, %d, %d, ch %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk); err++; } }; // if // Let non-master threads go loop_sync = 1; } else { int i; // Workers wait for master thread to finish, then call __kmpc_dispatch_next for (i = 0; i < 1000000; ++ i) { if (loop_sync != 0) { break; }; // if if (!(i & (32 - 1))) THREAD_SCHED_POINT(); }; // for i while (loop_sync == 0) { delay(); THREAD_SCHED_POINT(); }; // while // At this moment we do not have any more chunks -- all the chunks already // processed by master thread rc = __kmpc_dispatch_next_8(&loc, gtid, &last, &lb, &ub, &st); if (rc) { printf("Error return value\n"); err++; } }; // if __kmpc_barrier(&loc, gtid); if (tid == 0) { loop_sync = 0; // Restore original state #if DEBUG printf("run_loop_64(): at the end\n"); #endif }; // if __kmpc_barrier(&loc, gtid); return err; } // run_loop // --------------------------------------------------------------------------- int run_loop_32(int loop_lb, int loop_ub, int loop_st, int loop_chunk) { int err = 0; static int volatile loop_sync = 0; int lb; // Chunk lower bound int ub; // Chunk upper bound int st; // Chunk stride int rc; int tid = omp_get_thread_num(); int gtid = tid; int last; #if DEBUG printf("run_loop_<%d>(lb=%d, ub=%d, st=%d, ch=%d)\n", (int)sizeof(int), gtid, tid, (int)loop_lb, (int)loop_ub, (int)loop_st, loop_chunk); #endif // Don't test degenerate cases that should have been discovered by codegen if (loop_st == 0) return 0; if (loop_st > 0 ? loop_lb > loop_ub : loop_lb < loop_ub) return 0; __kmpc_dispatch_init_4(&loc, gtid, kmp_sch_guided_simd, loop_lb, loop_ub, loop_st, loop_chunk); if (tid == 0) { // Let the master thread handle the chunks alone int chunk; // No of current chunk int next_lb; // Lower bound of the next chunk int last_ub; // Upper bound of the last processed chunk u64 cur; // Number of interations in current chunk u64 max; // Max allowed iterations for current chunk int undersized = 0; chunk = 0; next_lb = loop_lb; max = (loop_ub - loop_lb) / loop_st + 1; // The first chunk can consume all iterations while (__kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st)) { ++ chunk; #if DEBUG printf("chunk=%d, lb=%d, ub=%d\n", chunk, (int)lb, (int)ub); #endif // Check if previous chunk (it is not the final chunk) is undersized if (undersized) { printf("Error with chunk %d\n", chunk); err++; } // Check lower and upper bounds if (lb != next_lb) { printf("Error with lb %d, %d, ch %d\n", (int)lb, (int)next_lb, chunk); err++; } if (loop_st > 0) { if (!(ub <= loop_ub)) { printf("Error with ub %d, %d, ch %d\n", (int)ub, (int)loop_ub, chunk); err++; } if (!(lb <= ub)) { printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk); err++; } } else { if (!(ub >= loop_ub)) { printf("Error with ub %d, %d, %d\n", (int)ub, (int)loop_ub, chunk); err++; } if (!(lb >= ub)) { printf("Error with bounds %d, %d, %d\n", (int)lb, (int)ub, chunk); err++; } }; // if // Stride should not change if (!(st == loop_st)) { printf("Error with st %d, %d, ch %d\n", (int)st, (int)loop_st, chunk); err++; } cur = (ub - lb) / loop_st + 1; // Guided scheduling uses FP computations, so current chunk may // be a bit bigger (+1) than allowed maximum if (!(cur <= max + 1)) { printf("Error with iter %llu, %llu\n", cur, max); err++; } // Update maximum for the next chunk if (cur < max) max = cur; next_lb = ub + loop_st; last_ub = ub; undersized = (cur < loop_chunk); }; // while // Must have at least one chunk if (!(chunk > 0)) { printf("Error with chunk %d\n", chunk); err++; } // Must have the right last iteration index if (loop_st > 0) { if (!(last_ub <= loop_ub)) { printf("Error with last1 %d, %d, ch %d\n", (int)last_ub, (int)loop_ub, chunk); err++; } if (!(last_ub + loop_st > loop_ub)) { printf("Error with last2 %d, %d, %d, ch %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk); err++; } } else { if (!(last_ub >= loop_ub)) { printf("Error with last1 %d, %d, ch %d\n", (int)last_ub, (int)loop_ub, chunk); err++; } if (!(last_ub + loop_st < loop_ub)) { printf("Error with last2 %d, %d, %d, ch %d\n", (int)last_ub, (int)loop_st, (int)loop_ub, chunk); err++; } }; // if // Let non-master threads go loop_sync = 1; } else { int i; // Workers wait for master thread to finish, then call __kmpc_dispatch_next for (i = 0; i < 1000000; ++ i) { if (loop_sync != 0) { break; }; // if }; // for i while (loop_sync == 0) { delay(); THREAD_SCHED_POINT(); }; // while // At this moment we do not have any more chunks -- all the chunks already // processed by the master thread rc = __kmpc_dispatch_next_4(&loc, gtid, &last, &lb, &ub, &st); if (rc) { printf("Error return value\n"); err++; } }; // if __kmpc_barrier(&loc, gtid); if (tid == 0) { loop_sync = 0; // Restore original state #if DEBUG printf("run_loop<>(): at the end\n"); #endif }; // if __kmpc_barrier(&loc, gtid); return err; } // run_loop // --------------------------------------------------------------------------- int run_64(int num_th) { int err = 0; #pragma omp parallel num_threads(num_th) { int chunk; i64 st, lb, ub; for (chunk = SIMD_LEN; chunk <= 3*SIMD_LEN; chunk += SIMD_LEN) { for (st = 1; st <= 3; ++ st) { for (lb = -3 * num_th * st; lb <= 3 * num_th * st; ++ lb) { for (ub = lb; ub < lb + num_th * (chunk+1) * st; ++ ub) { err += run_loop_64(lb, ub, st, chunk); err += run_loop_64(ub, lb, -st, chunk); }; // for ub }; // for lb }; // for st }; // for chunk } return err; } // run_all int run_32(int num_th) { int err = 0; #pragma omp parallel num_threads(num_th) { int chunk, st, lb, ub; for (chunk = SIMD_LEN; chunk <= 3*SIMD_LEN; chunk += SIMD_LEN) { for (st = 1; st <= 3; ++ st) { for (lb = -3 * num_th * st; lb <= 3 * num_th * st; ++ lb) { for (ub = lb; ub < lb + num_th * (chunk+1) * st; ++ ub) { err += run_loop_32(lb, ub, st, chunk); err += run_loop_32(ub, lb, -st, chunk); }; // for ub }; // for lb }; // for st }; // for chunk } return err; } // run_all // --------------------------------------------------------------------------- int main() { int n, err = 0; for (n = 1; n <= 4; ++ n) { err += run_32(n); err += run_64(n); }; // for n if (err) printf("failed with %d errors\n", err); else printf("passed\n"); return err; }
GB_unaryop__identity_bool_int32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__identity_bool_int32 // op(A') function: GB_tran__identity_bool_int32 // C type: bool // A type: int32_t // cast: bool cij = (bool) aij // unaryop: cij = aij #define GB_ATYPE \ int32_t #define GB_CTYPE \ bool // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ int32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CASTING(z, x) \ bool z = (bool) x ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_BOOL || GxB_NO_INT32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__identity_bool_int32 ( bool *restrict Cx, const int32_t *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__identity_bool_int32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
DES_bs_b.c
/* * This file is part of John the Ripper password cracker, * Copyright (c) 1996-2001,2003,2010-2013,2015 by Solar Designer * * Addition of single DES encryption with no salt by * Deepika Dutta Mishra <dipikadutta at gmail.com> in 2012, no * rights reserved. */ #ifdef _MSC_VER #undef _OPENMP #endif #include "arch.h" #include "common.h" #include "DES_bs.h" #include "memdbg.h" #if DES_BS_ASM && defined(_OPENMP) && defined(__GNUC__) #warning Assembly code and OpenMP are both requested - will provide the former, but not the latter (for DES-based hashes). This may likely be corrected by enabling SIMD intrinsics with the C compiler (try adding -msse2 to OMPFLAGS). #endif #if !DES_BS_ASM #define vzero (*(vtype *)&DES_bs_all.zero) #if DES_bs_mt #define vones (*(vtype *)&DES_bs_all_by_tnum(-1).ones) #else #define vones (*(vtype *)&DES_bs_all.ones) #endif #define DES_BS_VECTOR_LOOPS 0 #if defined(__ARM_NEON) && DES_BS_DEPTH == 64 #include <arm_neon.h> typedef uint32x2_t vtype; #define vst(dst, ofs, src) \ vst1_u32((uint32_t *)((DES_bs_vector *)&(dst) + (ofs)), (src)) #define vxorf(a, b) \ veor_u32((a), (b)) #define vnot(dst, a) \ (dst) = vmvn_u32((a)) #define vand(dst, a, b) \ (dst) = vand_u32((a), (b)) #define vor(dst, a, b) \ (dst) = vorr_u32((a), (b)) #define vandn(dst, a, b) \ (dst) = vbic_u32((a), (b)) #define vsel(dst, a, b, c) \ (dst) = vbsl_u32((c), (b), (a)) #if 0 #define vshl1(dst, src) \ (dst) = vadd_u32((src), (src)) #endif #define vshl(dst, src, shift) \ (dst) = vshl_n_u32((src), (shift)) #define vshr(dst, src, shift) \ (dst) = vshr_n_u32((src), (shift)) #elif defined(__ARM_NEON) && ARCH_BITS == 32 && DES_BS_DEPTH == 96 #include <arm_neon.h> typedef struct { uint32x2_t f; unsigned ARCH_WORD g; } vtype; #define vst(dst, ofs, src) \ vst1_u32( \ (uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = veor_u32((a).f, (b).f); \ (dst).g = (a).g ^ (b).g #define vnot(dst, a) \ (dst).f = vmvn_u32((a).f); \ (dst).g = ~(a).g #define vand(dst, a, b) \ (dst).f = vand_u32((a).f, (b).f); \ (dst).g = (a).g & (b).g #define vor(dst, a, b) \ (dst).f = vorr_u32((a).f, (b).f); \ (dst).g = (a).g | (b).g #define vandn(dst, a, b) \ (dst).f = vbic_u32((a).f, (b).f); \ (dst).g = (a).g & ~(b).g #define vsel(dst, a, b, c) \ (dst).f = vbsl_u32((c).f, (b).f, (a).f); \ (dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g)) #elif defined(__ARM_NEON) && DES_BS_DEPTH == 128 && defined(DES_BS_2X64) #include <arm_neon.h> typedef struct { uint32x2_t f, g; } vtype; #define vst(dst, ofs, src) \ vst1_u32( \ (uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ vst1_u32( \ (uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \ (src).g) #define vxor(dst, a, b) \ (dst).f = veor_u32((a).f, (b).f); \ (dst).g = veor_u32((a).g, (b).g) #define vnot(dst, a) \ (dst).f = vmvn_u32((a).f); \ (dst).g = vmvn_u32((a).g) #define vand(dst, a, b) \ (dst).f = vand_u32((a).f, (b).f); \ (dst).g = vand_u32((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = vorr_u32((a).f, (b).f); \ (dst).g = vorr_u32((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = vbic_u32((a).f, (b).f); \ (dst).g = vbic_u32((a).g, (b).g) #define vsel(dst, a, b, c) \ (dst).f = vbsl_u32((c).f, (b).f, (a).f); \ (dst).g = vbsl_u32((c).g, (b).g, (a).g) #elif defined(__ARM_NEON) && DES_BS_DEPTH == 128 #include <arm_neon.h> typedef uint32x4_t vtype; #define vst(dst, ofs, src) \ vst1q_u32((uint32_t *)((DES_bs_vector *)&(dst) + (ofs)), (src)) #define vxorf(a, b) \ veorq_u32((a), (b)) #define vnot(dst, a) \ (dst) = vmvnq_u32((a)) #define vand(dst, a, b) \ (dst) = vandq_u32((a), (b)) #define vor(dst, a, b) \ (dst) = vorrq_u32((a), (b)) #define vandn(dst, a, b) \ (dst) = vbicq_u32((a), (b)) #define vsel(dst, a, b, c) \ (dst) = vbslq_u32((c), (b), (a)) #if 0 #define vshl1(dst, src) \ (dst) = vaddq_u32((src), (src)) #endif #define vshl(dst, src, shift) \ (dst) = vshlq_n_u32((src), (shift)) #define vshr(dst, src, shift) \ (dst) = vshrq_n_u32((src), (shift)) #elif defined(__ARM_NEON) && \ ((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \ (ARCH_BITS == 32 && DES_BS_DEPTH == 160)) #include <arm_neon.h> typedef struct { uint32x4_t f; unsigned ARCH_WORD g; } vtype; #define vst(dst, ofs, src) \ vst1q_u32( \ (uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = veorq_u32((a).f, (b).f); \ (dst).g = (a).g ^ (b).g #define vnot(dst, a) \ (dst).f = vmvnq_u32((a).f); \ (dst).g = ~(a).g #define vand(dst, a, b) \ (dst).f = vandq_u32((a).f, (b).f); \ (dst).g = (a).g & (b).g #define vor(dst, a, b) \ (dst).f = vorrq_u32((a).f, (b).f); \ (dst).g = (a).g | (b).g #define vandn(dst, a, b) \ (dst).f = vbicq_u32((a).f, (b).f); \ (dst).g = (a).g & ~(b).g #define vsel(dst, a, b, c) \ (dst).f = vbslq_u32((c).f, (b).f, (a).f); \ (dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g)) #elif defined(__ARM_NEON) && DES_BS_DEPTH == 256 #include <arm_neon.h> typedef struct { uint32x4_t f, g; } vtype; #define vst(dst, ofs, src) \ vst1q_u32( \ (uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ vst1q_u32( \ (uint32_t *)&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \ (src).g) #define vxor(dst, a, b) \ (dst).f = veorq_u32((a).f, (b).f); \ (dst).g = veorq_u32((a).g, (b).g) #define vnot(dst, a) \ (dst).f = vmvnq_u32((a).f); \ (dst).g = vmvnq_u32((a).g) #define vand(dst, a, b) \ (dst).f = vandq_u32((a).f, (b).f); \ (dst).g = vandq_u32((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = vorrq_u32((a).f, (b).f); \ (dst).g = vorrq_u32((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = vbicq_u32((a).f, (b).f); \ (dst).g = vbicq_u32((a).g, (b).g) #define vsel(dst, a, b, c) \ (dst).f = vbslq_u32((c).f, (b).f, (a).f); \ (dst).g = vbslq_u32((c).g, (b).g, (a).g) #elif defined(__ALTIVEC__) && DES_BS_DEPTH == 128 #ifdef __linux__ #include <altivec.h> #endif typedef vector signed int vtype; #define vst(dst, ofs, src) \ vec_st((src), (ofs) * sizeof(DES_bs_vector), (vtype *)(dst)) #define vxorf(a, b) \ vec_xor((a), (b)) #define vnot(dst, a) \ (dst) = vec_nor((a), (a)) #define vand(dst, a, b) \ (dst) = vec_and((a), (b)) #define vor(dst, a, b) \ (dst) = vec_or((a), (b)) #define vandn(dst, a, b) \ (dst) = vec_andc((a), (b)) #define vsel(dst, a, b, c) \ (dst) = vec_sel((a), (b), (vector bool int)(c)) #elif defined(__ALTIVEC__) && \ ((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \ (ARCH_BITS == 32 && DES_BS_DEPTH == 160)) #ifdef __linux__ #include <altivec.h> #endif typedef struct { vector signed int f; unsigned ARCH_WORD g; } vtype; #define vst(dst, ofs, src) \ vec_st((src).f, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = vec_xor((a).f, (b).f); \ (dst).g = (a).g ^ (b).g #define vnot(dst, a) \ (dst).f = vec_nor((a).f, (a).f); \ (dst).g = ~(a).g #define vand(dst, a, b) \ (dst).f = vec_and((a).f, (b).f); \ (dst).g = (a).g & (b).g #define vor(dst, a, b) \ (dst).f = vec_or((a).f, (b).f); \ (dst).g = (a).g | (b).g #define vandn(dst, a, b) \ (dst).f = vec_andc((a).f, (b).f); \ (dst).g = (a).g & ~(b).g #define vsel(dst, a, b, c) \ (dst).f = vec_sel((a).f, (b).f, (vector bool int)(c).f); \ (dst).g = (((a).g & ~(c).g) ^ ((b).g & (c).g)) #elif defined(__ALTIVEC__) && DES_BS_DEPTH == 256 #ifdef __linux__ #include <altivec.h> #endif typedef struct { vector signed int f, g; } vtype; #define vst(dst, ofs, src) \ vec_st((src).f, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->f); \ vec_st((src).g, (ofs) * sizeof(DES_bs_vector), ((vtype *)&(dst))->g) #define vxor(dst, a, b) \ (dst).f = vec_xor((a).f, (b).f); \ (dst).g = vec_xor((a).g, (b).g) #define vnot(dst, a) \ (dst).f = vec_nor((a).f, (a).f); \ (dst).g = vec_nor((a).g, (a).g) #define vand(dst, a, b) \ (dst).f = vec_and((a).f, (b).f); \ (dst).g = vec_and((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = vec_or((a).f, (b).f); \ (dst).g = vec_or((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = vec_andc((a).f, (b).f); \ (dst).g = vec_andc((a).g, (b).g) #define vsel(dst, a, b, c) \ (dst).f = vec_sel((a).f, (b).f, (vector bool int)(c).f); \ (dst).g = vec_sel((a).g, (b).g, (vector bool int)(c).g) #elif defined(__MIC__) && DES_BS_DEPTH == 512 #include <immintrin.h> typedef __m512i vtype; #define vst(dst, ofs, src) \ _mm512_store_epi32((vtype *)((DES_bs_vector *)&(dst) + (ofs)), (src)) #define vxorf(a, b) \ _mm512_xor_epi32((a), (b)) #define vand(dst, a, b) \ (dst) = _mm512_and_epi32((a), (b)) #define vor(dst, a, b) \ (dst) = _mm512_or_epi32((a), (b)) #define vandn(dst, a, b) \ (dst) = _mm512_andnot_epi32((b), (a)) #define vshl1(dst, src) \ (dst) = _mm512_add_epi32((src), (src)) #define vshl(dst, src, shift) \ (dst) = _mm512_slli_epi32((src), (shift)) #define vshr(dst, src, shift) \ (dst) = _mm512_srli_epi32((src), (shift)) #elif defined(__AVX__) && DES_BS_DEPTH == 256 && !defined(DES_BS_NO_AVX256) #include <immintrin.h> typedef __m256i vtype; #define vst(dst, ofs, src) \ _mm256_store_si256((vtype *)((DES_bs_vector *)&(dst) + (ofs)), (src)) #define vxorf(a, b) \ _mm256_xor_si256((a), (b)) #define vand(dst, a, b) \ (dst) = _mm256_and_si256((a), (b)) #define vor(dst, a, b) \ (dst) = _mm256_or_si256((a), (b)) #define vandn(dst, a, b) \ (dst) = _mm256_andnot_si256((b), (a)) #ifdef __XOP__ /* This could be _mm256_cmov_si256(), but it does not exist (yet?) */ #define vsel(dst, a, b, c) \ (dst) = __builtin_ia32_vpcmov_v8sf256((b), (a), (c)) #endif #define vshl1(dst, src) \ (dst) = _mm256_add_epi8((src), (src)) #define vshl(dst, src, shift) \ (dst) = _mm256_slli_epi64((src), (shift)) #define vshr(dst, src, shift) \ (dst) = _mm256_srli_epi64((src), (shift)) #elif defined(__AVX__) && DES_BS_DEPTH == 384 && !defined(DES_BS_NO_AVX128) #include <immintrin.h> #ifdef __XOP__ #include <x86intrin.h> #endif typedef struct { __m256i f; __m128i g; } vtype; #define vst(dst, ofs, src) \ _mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ _mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \ (src).g) #define vxor(dst, a, b) \ (dst).f = _mm256_xor_si256((a).f, (b).f); \ (dst).g = _mm_xor_si128((a).g, (b).g) #define vand(dst, a, b) \ (dst).f = _mm256_and_si256((a).f, (b).f); \ (dst).g = _mm_and_si128((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = _mm256_or_si256((a).f, (b).f); \ (dst).g = _mm_or_si128((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = _mm256_andnot_si256((b).f, (a).f); \ (dst).g = _mm_andnot_si128((b).g, (a).g) #ifdef __XOP__ /* This could be _mm256_cmov_ps(), but it does not exist (yet?) */ #define vsel(dst, a, b, c) \ (dst).f = __builtin_ia32_vpcmov_v8sf256((b).f, (a).f, (c).f); \ (dst).g = _mm_cmov_si128((b).g, (a).g, (c).g) #endif #define vshl(dst, src, shift) \ (dst).f = _mm256_slli_epi64((src).f, (shift)); \ (dst).g = _mm_slli_epi64((src).g, (shift)) #define vshr(dst, src, shift) \ (dst).f = _mm256_srli_epi64((src).f, (shift)); \ (dst).g = _mm_srli_epi64((src).g, (shift)) #elif defined(__AVX__) && DES_BS_DEPTH == 512 #include <immintrin.h> typedef struct { __m256i f, g; } vtype; #define vst(dst, ofs, src) \ _mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ _mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \ (src).g) #define vxor(dst, a, b) \ (dst).f = _mm256_xor_si256((a).f, (b).f); \ (dst).g = _mm256_xor_si256((a).g, (b).g) #define vand(dst, a, b) \ (dst).f = _mm256_and_si256((a).f, (b).f); \ (dst).g = _mm256_and_si256((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = _mm256_or_si256((a).f, (b).f); \ (dst).g = _mm256_or_si256((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = _mm256_andnot_si256((b).f, (a).f); \ (dst).g = _mm256_andnot_si256((b).g, (a).g) #ifdef __XOP__ /* This could be _mm256_cmov_ps(), but it does not exist (yet?) */ #define vsel(dst, a, b, c) \ (dst).f = __builtin_ia32_vpcmov_v8sf256((b).f, (a).f, (c).f); \ (dst).g = __builtin_ia32_vpcmov_v8sf256((b).g, (a).g, (c).g) #endif #define vshl(dst, src, shift) \ (dst).f = _mm256_slli_epi64((src).f, (shift)); \ (dst).g = _mm256_slli_epi64((src).g, (shift)) #define vshr(dst, src, shift) \ (dst).f = _mm256_srli_epi64((src).f, (shift)); \ (dst).g = _mm256_srli_epi64((src).g, (shift)) #elif defined(__AVX__) && defined(__MMX__) && DES_BS_DEPTH == 320 && \ !defined(DES_BS_NO_MMX) #include <immintrin.h> #include <mmintrin.h> typedef struct { __m256i f; __m64 g; } vtype; #define vst(dst, ofs, src) \ _mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = _mm256_xor_si256((a).f, (b).f); \ (dst).g = _mm_xor_si64((a).g, (b).g) #define vand(dst, a, b) \ (dst).f = _mm256_and_si256((a).f, (b).f); \ (dst).g = _mm_and_si64((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = _mm256_or_si256((a).f, (b).f); \ (dst).g = _mm_or_si64((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = _mm256_andnot_si256((b).f, (a).f); \ (dst).g = _mm_andnot_si64((b).g, (a).g) #define vshl(dst, src, shift) \ (dst).f = _mm256_slli_epi64((src).f, (shift)); \ (dst).g = _mm_slli_si64((src).g, (shift)) #define vshr(dst, src, shift) \ (dst).f = _mm256_srli_epi64((src).f, (shift)); \ (dst).g = _mm_srli_si64((src).g, (shift)) #elif defined(__AVX__) && \ ((ARCH_BITS == 64 && DES_BS_DEPTH == 320) || \ (ARCH_BITS == 32 && DES_BS_DEPTH == 288)) #include <immintrin.h> #include <mmintrin.h> typedef struct { __m256i f; unsigned ARCH_WORD g; } vtype; #define vst(dst, ofs, src) \ _mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = _mm256_xor_si256((a).f, (b).f); \ (dst).g = (a).g ^ (b).g #define vnot(dst, a) \ (dst).f = _mm256_xor_si256((a).f, vones.f); \ (dst).g = ~(a).g #define vand(dst, a, b) \ (dst).f = _mm256_and_si256((a).f, (b).f); \ (dst).g = (a).g & (b).g #define vor(dst, a, b) \ (dst).f = _mm256_or_si256((a).f, (b).f); \ (dst).g = (a).g | (b).g #define vandn(dst, a, b) \ (dst).f = _mm256_andnot_si256((b).f, (a).f); \ (dst).g = (a).g & ~(b).g #define vshl(dst, src, shift) \ (dst).f = _mm256_slli_epi64((src).f, (shift)); \ (dst).g = (src).g << (shift) #define vshr(dst, src, shift) \ (dst).f = _mm256_srli_epi64((src).f, (shift)); \ (dst).g = (src).g >> (shift) #elif defined(__AVX__) && defined(__MMX__) && \ ((ARCH_BITS == 64 && DES_BS_DEPTH == 384) || \ (ARCH_BITS == 32 && DES_BS_DEPTH == 352)) #include <immintrin.h> #include <mmintrin.h> typedef struct { __m256i f; __m64 g; unsigned ARCH_WORD h; } vtype; #define vst(dst, ofs, src) \ _mm256_store_si256(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g; \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->h = (src).h #define vxor(dst, a, b) \ (dst).f = _mm256_xor_si256((a).f, (b).f); \ (dst).g = _mm_xor_si64((a).g, (b).g); \ (dst).h = (a).h ^ (b).h #define vnot(dst, a) \ (dst).f = _mm256_xor_si256((a).f, vones.f); \ (dst).g = _mm_xor_si64((a).g, vones.g); \ (dst).h = ~(a).h #define vand(dst, a, b) \ (dst).f = _mm256_and_si256((a).f, (b).f); \ (dst).g = _mm_and_si64((a).g, (b).g); \ (dst).h = (a).h & (b).h #define vor(dst, a, b) \ (dst).f = _mm256_or_si256((a).f, (b).f); \ (dst).g = _mm_or_si64((a).g, (b).g); \ (dst).h = (a).h | (b).h #define vandn(dst, a, b) \ (dst).f = _mm256_andnot_si256((b).f, (a).f); \ (dst).g = _mm_andnot_si64((b).g, (a).g); \ (dst).h = (a).h & ~(b).h #define vshl(dst, src, shift) \ (dst).f = _mm256_slli_epi64((src).f, (shift)); \ (dst).g = _mm_slli_si64((src).g, (shift)); \ (dst).h = (src).h << (shift) #define vshr(dst, src, shift) \ (dst).f = _mm256_srli_epi64((src).f, (shift)); \ (dst).g = _mm_srli_si64((src).g, (shift)); \ (dst).h = (src).h >> (shift) #elif defined(__SSE2__) && DES_BS_DEPTH == 128 #ifdef __AVX__ #include <immintrin.h> #ifdef __XOP__ #include <x86intrin.h> #endif #else #include <emmintrin.h> #endif typedef __m128i vtype; #define vst(dst, ofs, src) \ _mm_store_si128((vtype *)((DES_bs_vector *)&(dst) + (ofs)), (src)) #define vxorf(a, b) \ _mm_xor_si128((a), (b)) #define vand(dst, a, b) \ (dst) = _mm_and_si128((a), (b)) #define vor(dst, a, b) \ (dst) = _mm_or_si128((a), (b)) #define vandn(dst, a, b) \ (dst) = _mm_andnot_si128((b), (a)) #ifdef __XOP__ #define vsel(dst, a, b, c) \ (dst) = _mm_cmov_si128((b), (a), (c)) #else #define vsel(dst, a, b, c) \ (dst) = _mm_xor_si128(_mm_andnot_si128((c), (a)), \ _mm_and_si128((c), (b))) #endif #define vshl1(dst, src) \ (dst) = _mm_add_epi8((src), (src)) #define vshl(dst, src, shift) \ (dst) = _mm_slli_epi64((src), (shift)) #define vshr(dst, src, shift) \ (dst) = _mm_srli_epi64((src), (shift)) #elif defined(__SSE2__) && DES_BS_DEPTH == 256 && defined(DES_BS_NO_MMX) #ifdef __AVX__ #include <immintrin.h> #ifdef __XOP__ #include <x86intrin.h> #endif #else #include <emmintrin.h> #endif typedef struct { __m128i f, g; } vtype; #define vst(dst, ofs, src) \ _mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ _mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g, \ (src).g) #define vxor(dst, a, b) \ (dst).f = _mm_xor_si128((a).f, (b).f); \ (dst).g = _mm_xor_si128((a).g, (b).g) #define vand(dst, a, b) \ (dst).f = _mm_and_si128((a).f, (b).f); \ (dst).g = _mm_and_si128((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = _mm_or_si128((a).f, (b).f); \ (dst).g = _mm_or_si128((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = _mm_andnot_si128((b).f, (a).f); \ (dst).g = _mm_andnot_si128((b).g, (a).g) #ifdef __XOP__ #define vsel(dst, a, b, c) \ (dst).f = _mm_cmov_si128((b).f, (a).f, (c).f); \ (dst).g = _mm_cmov_si128((b).g, (a).g, (c).g) #endif #define vshl1(dst, src) \ (dst).f = _mm_add_epi8((src).f, (src).f); \ (dst).g = _mm_add_epi8((src).g, (src).g) #define vshl(dst, src, shift) \ (dst).f = _mm_slli_epi64((src).f, (shift)); \ (dst).g = _mm_slli_epi64((src).g, (shift)) #define vshr(dst, src, shift) \ (dst).f = _mm_srli_epi64((src).f, (shift)); \ (dst).g = _mm_srli_epi64((src).g, (shift)) #elif defined(__SSE2__) && defined(__MMX__) && DES_BS_DEPTH == 192 && \ !defined(DES_BS_NO_MMX) #include <emmintrin.h> #include <mmintrin.h> typedef struct { __m128i f; __m64 g; } vtype; #define vst(dst, ofs, src) \ _mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = _mm_xor_si128((a).f, (b).f); \ (dst).g = _mm_xor_si64((a).g, (b).g) #define vand(dst, a, b) \ (dst).f = _mm_and_si128((a).f, (b).f); \ (dst).g = _mm_and_si64((a).g, (b).g) #define vor(dst, a, b) \ (dst).f = _mm_or_si128((a).f, (b).f); \ (dst).g = _mm_or_si64((a).g, (b).g) #define vandn(dst, a, b) \ (dst).f = _mm_andnot_si128((b).f, (a).f); \ (dst).g = _mm_andnot_si64((b).g, (a).g) #define vshl1(dst, src) \ (dst).f = _mm_add_epi8((src).f, (src).f); \ (dst).g = _mm_add_pi8((src).g, (src).g) #define vshl(dst, src, shift) \ (dst).f = _mm_slli_epi64((src).f, (shift)); \ (dst).g = _mm_slli_si64((src).g, (shift)) #define vshr(dst, src, shift) \ (dst).f = _mm_srli_epi64((src).f, (shift)); \ (dst).g = _mm_srli_si64((src).g, (shift)) #elif defined(__SSE2__) && \ ((ARCH_BITS == 64 && DES_BS_DEPTH == 192) || \ (ARCH_BITS == 32 && DES_BS_DEPTH == 160)) #include <emmintrin.h> typedef struct { __m128i f; unsigned ARCH_WORD g; } vtype; #define vst(dst, ofs, src) \ _mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = _mm_xor_si128((a).f, (b).f); \ (dst).g = (a).g ^ (b).g #define vnot(dst, a) \ (dst).f = _mm_xor_si128((a).f, vones.f); \ (dst).g = ~(a).g #define vand(dst, a, b) \ (dst).f = _mm_and_si128((a).f, (b).f); \ (dst).g = (a).g & (b).g #define vor(dst, a, b) \ (dst).f = _mm_or_si128((a).f, (b).f); \ (dst).g = (a).g | (b).g #define vandn(dst, a, b) \ (dst).f = _mm_andnot_si128((b).f, (a).f); \ (dst).g = (a).g & ~(b).g #define vshl1(dst, src) \ (dst).f = _mm_add_epi8((src).f, (src).f); \ (dst).g = (src).g << 1 #define vshl(dst, src, shift) \ (dst).f = _mm_slli_epi64((src).f, (shift)); \ (dst).g = (src).g << (shift) #define vshr(dst, src, shift) \ (dst).f = _mm_srli_epi64((src).f, (shift)); \ (dst).g = (src).g >> (shift) #elif defined(__SSE2__) && defined(__MMX__) && \ ((ARCH_BITS == 64 && DES_BS_DEPTH == 256) || \ (ARCH_BITS == 32 && DES_BS_DEPTH == 224)) #include <emmintrin.h> #include <mmintrin.h> typedef struct { __m128i f; __m64 g; unsigned ARCH_WORD h; } vtype; #define vst(dst, ofs, src) \ _mm_store_si128(&((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f, \ (src).f); \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g; \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->h = (src).h #define vxor(dst, a, b) \ (dst).f = _mm_xor_si128((a).f, (b).f); \ (dst).g = _mm_xor_si64((a).g, (b).g); \ (dst).h = (a).h ^ (b).h #define vnot(dst, a) \ (dst).f = _mm_xor_si128((a).f, vones.f); \ (dst).g = _mm_xor_si64((a).g, vones.g); \ (dst).h = ~(a).h #define vand(dst, a, b) \ (dst).f = _mm_and_si128((a).f, (b).f); \ (dst).g = _mm_and_si64((a).g, (b).g); \ (dst).h = (a).h & (b).h #define vor(dst, a, b) \ (dst).f = _mm_or_si128((a).f, (b).f); \ (dst).g = _mm_or_si64((a).g, (b).g); \ (dst).h = (a).h | (b).h #define vandn(dst, a, b) \ (dst).f = _mm_andnot_si128((b).f, (a).f); \ (dst).g = _mm_andnot_si64((b).g, (a).g); \ (dst).h = (a).h & ~(b).h #define vshl1(dst, src) \ (dst).f = _mm_add_epi8((src).f, (src).f); \ (dst).g = _mm_add_pi8((src).g, (src).g); \ (dst).h = (src).h << 1 #define vshl(dst, src, shift) \ (dst).f = _mm_slli_epi64((src).f, (shift)); \ (dst).g = _mm_slli_si64((src).g, (shift)); \ (dst).h = (src).h << (shift) #define vshr(dst, src, shift) \ (dst).f = _mm_srli_epi64((src).f, (shift)); \ (dst).g = _mm_srli_si64((src).g, (shift)); \ (dst).h = (src).h >> (shift) #elif defined(__MMX__) && ARCH_BITS != 64 && DES_BS_DEPTH == 64 #include <mmintrin.h> typedef __m64 vtype; #define vxorf(a, b) \ _mm_xor_si64((a), (b)) #define vand(dst, a, b) \ (dst) = _mm_and_si64((a), (b)) #define vor(dst, a, b) \ (dst) = _mm_or_si64((a), (b)) #define vandn(dst, a, b) \ (dst) = _mm_andnot_si64((b), (a)) #define vshl1(dst, src) \ (dst) = _mm_add_pi8((src), (src)) #define vshl(dst, src, shift) \ (dst) = _mm_slli_si64((src), (shift)) #define vshr(dst, src, shift) \ (dst) = _mm_srli_si64((src), (shift)) #elif defined(__MMX__) && ARCH_BITS == 32 && DES_BS_DEPTH == 96 #include <mmintrin.h> typedef struct { __m64 f; unsigned ARCH_WORD g; } vtype; #define vst(dst, ofs, src) \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->f = (src).f; \ ((vtype *)((DES_bs_vector *)&(dst) + (ofs)))->g = (src).g #define vxor(dst, a, b) \ (dst).f = _mm_xor_si64((a).f, (b).f); \ (dst).g = (a).g ^ (b).g #define vnot(dst, a) \ (dst).f = _mm_xor_si64((a).f, vones.f); \ (dst).g = ~(a).g #define vand(dst, a, b) \ (dst).f = _mm_and_si64((a).f, (b).f); \ (dst).g = (a).g & (b).g #define vor(dst, a, b) \ (dst).f = _mm_or_si64((a).f, (b).f); \ (dst).g = (a).g | (b).g #define vandn(dst, a, b) \ (dst).f = _mm_andnot_si64((b).f, (a).f); \ (dst).g = (a).g & ~(b).g #define vshl1(dst, src) \ (dst).f = _mm_add_pi8((src).f, (src).f); \ (dst).g = (src).g << 1 #define vshl(dst, src, shift) \ (dst).f = _mm_slli_si64((src).f, (shift)); \ (dst).g = (src).g << (shift) #define vshr(dst, src, shift) \ (dst).f = _mm_srli_si64((src).f, (shift)); \ (dst).g = (src).g >> (shift) #else #if DES_BS_VECTOR #undef DES_BS_VECTOR_LOOPS #define DES_BS_VECTOR_LOOPS 1 #endif typedef unsigned ARCH_WORD vtype; #define vxorf(a, b) \ ((a) ^ (b)) #define vnot(dst, a) \ (dst) = ~(a) #define vand(dst, a, b) \ (dst) = (a) & (b) #define vor(dst, a, b) \ (dst) = (a) | (b) #define vandn(dst, a, b) \ (dst) = (a) & ~(b) #define vsel(dst, a, b, c) \ (dst) = (((a) & ~(c)) ^ ((b) & (c))) #define vshl(dst, src, shift) \ (dst) = (src) << (shift) #define vshr(dst, src, shift) \ (dst) = (src) >> (shift) /* Assume that 0 always fits in one load immediate instruction */ #undef vzero #define vzero 0 /* Archs friendly to use of immediate values */ #if defined(__x86_64__) || defined(__i386__) #undef vones #define vones (~(vtype)0) #endif #endif #ifndef vst #define vst(dst, ofs, src) \ *((vtype *)((DES_bs_vector *)&(dst) + (ofs))) = (src) #endif #if !defined(vxor) && defined(vxorf) #define vxor(dst, a, b) \ (dst) = vxorf((a), (b)) #endif #if !defined(vxorf) && defined(vxor) /* * This requires gcc's "Statement Exprs" extension (also supported by a number * of other C compilers). */ #define vxorf(a, b) \ ({ vtype tmp; vxor(tmp, (a), (b)); tmp; }) #endif #ifndef vnot #define vnot(dst, a) \ vxor((dst), (a), vones) #endif #ifndef vshl1 #define vshl1(dst, src) \ vshl((dst), (src), 1) #endif #if !DES_BS_VECTOR_LOOPS && defined(vshl) && defined(vshr) #define DES_BS_VECTOR_LOOPS_K 0 #define DEPTH_K #define for_each_depth_k() #define kvtype vtype #define kvand vand #define kvor vor #define kvshl1 vshl1 #define kvshl vshl #define kvshr vshr #else #if DES_BS_VECTOR #define DES_BS_VECTOR_LOOPS_K 1 #define DEPTH_K [depth] #define for_each_depth_k() \ for (depth = 0; depth < DES_BS_VECTOR; depth++) #else #define DES_BS_VECTOR_LOOPS_K 0 #endif typedef unsigned ARCH_WORD kvtype; #define kvand(dst, a, b) \ (dst) = (a) & (b) #define kvor(dst, a, b) \ (dst) = (a) | (b) #define kvshl1(dst, src) \ (dst) = (src) << 1 #define kvshl(dst, src, shift) \ (dst) = (src) << (shift) #define kvshr(dst, src, shift) \ (dst) = (src) >> (shift) #endif #if !DES_BS_VECTOR || DES_BS_VECTOR_LOOPS_K #ifdef __x86_64__ #define mask01 0x0101010101010101UL #elif __i386__ #define mask01 0x01010101UL #else #undef mask01 #endif #ifdef mask01 #define mask02 (mask01 << 1) #define mask04 (mask01 << 2) #define mask08 (mask01 << 3) #define mask10 (mask01 << 4) #define mask20 (mask01 << 5) #define mask40 (mask01 << 6) #define mask80 (mask01 << 7) #endif #endif #ifndef mask01 #define mask01 (*(kvtype *)&DES_bs_all.masks[0]) #define mask02 (*(kvtype *)&DES_bs_all.masks[1]) #define mask04 (*(kvtype *)&DES_bs_all.masks[2]) #define mask08 (*(kvtype *)&DES_bs_all.masks[3]) #define mask10 (*(kvtype *)&DES_bs_all.masks[4]) #define mask20 (*(kvtype *)&DES_bs_all.masks[5]) #define mask40 (*(kvtype *)&DES_bs_all.masks[6]) #define mask80 (*(kvtype *)&DES_bs_all.masks[7]) #endif #ifdef __i386__ /* register-starved */ #define LOAD_V \ kvtype v0 = *(kvtype *)&vp[0]; \ kvtype v4 = *(kvtype *)&vp[4]; #define v1 *(kvtype *)&vp[1] #define v2 *(kvtype *)&vp[2] #define v3 *(kvtype *)&vp[3] #define v5 *(kvtype *)&vp[5] #define v6 *(kvtype *)&vp[6] #define v7 *(kvtype *)&vp[7] #else #define LOAD_V \ kvtype v0 = *(kvtype *)&vp[0]; \ kvtype v1 = *(kvtype *)&vp[1]; \ kvtype v2 = *(kvtype *)&vp[2]; \ kvtype v3 = *(kvtype *)&vp[3]; \ kvtype v4 = *(kvtype *)&vp[4]; \ kvtype v5 = *(kvtype *)&vp[5]; \ kvtype v6 = *(kvtype *)&vp[6]; \ kvtype v7 = *(kvtype *)&vp[7]; #endif #define kvand_shl1_or(dst, src, mask) \ kvand(tmp, src, mask); \ kvshl1(tmp, tmp); \ kvor(dst, dst, tmp) #define kvand_shl_or(dst, src, mask, shift) \ kvand(tmp, src, mask); \ kvshl(tmp, tmp, shift); \ kvor(dst, dst, tmp) #define kvand_shl1(dst, src, mask) \ kvand(tmp, src, mask); \ kvshl1(dst, tmp) #define kvand_or(dst, src, mask) \ kvand(tmp, src, mask); \ kvor(dst, dst, tmp) #define kvand_shr_or(dst, src, mask, shift) \ kvand(tmp, src, mask); \ kvshr(tmp, tmp, shift); \ kvor(dst, dst, tmp) #define kvand_shr(dst, src, mask, shift) \ kvand(tmp, src, mask); \ kvshr(dst, tmp, shift) #define FINALIZE_NEXT_KEY_BIT_0 { \ kvtype m = mask01, va, vb, tmp; \ kvand(va, v0, m); \ kvand_shl1(vb, v1, m); \ kvand_shl_or(va, v2, m, 2); \ kvand_shl_or(vb, v3, m, 3); \ kvand_shl_or(va, v4, m, 4); \ kvand_shl_or(vb, v5, m, 5); \ kvand_shl_or(va, v6, m, 6); \ kvand_shl_or(vb, v7, m, 7); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_1 { \ kvtype m = mask02, va, vb, tmp; \ kvand_shr(va, v0, m, 1); \ kvand(vb, v1, m); \ kvand_shl1_or(va, v2, m); \ kvand_shl_or(vb, v3, m, 2); \ kvand_shl_or(va, v4, m, 3); \ kvand_shl_or(vb, v5, m, 4); \ kvand_shl_or(va, v6, m, 5); \ kvand_shl_or(vb, v7, m, 6); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_2 { \ kvtype m = mask04, va, vb, tmp; \ kvand_shr(va, v0, m, 2); \ kvand_shr(vb, v1, m, 1); \ kvand_or(va, v2, m); \ kvand_shl1_or(vb, v3, m); \ kvand_shl_or(va, v4, m, 2); \ kvand_shl_or(vb, v5, m, 3); \ kvand_shl_or(va, v6, m, 4); \ kvand_shl_or(vb, v7, m, 5); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_3 { \ kvtype m = mask08, va, vb, tmp; \ kvand_shr(va, v0, m, 3); \ kvand_shr(vb, v1, m, 2); \ kvand_shr_or(va, v2, m, 1); \ kvand_or(vb, v3, m); \ kvand_shl1_or(va, v4, m); \ kvand_shl_or(vb, v5, m, 2); \ kvand_shl_or(va, v6, m, 3); \ kvand_shl_or(vb, v7, m, 4); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_4 { \ kvtype m = mask10, va, vb, tmp; \ kvand_shr(va, v0, m, 4); \ kvand_shr(vb, v1, m, 3); \ kvand_shr_or(va, v2, m, 2); \ kvand_shr_or(vb, v3, m, 1); \ kvand_or(va, v4, m); \ kvand_shl1_or(vb, v5, m); \ kvand_shl_or(va, v6, m, 2); \ kvand_shl_or(vb, v7, m, 3); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_5 { \ kvtype m = mask20, va, vb, tmp; \ kvand_shr(va, v0, m, 5); \ kvand_shr(vb, v1, m, 4); \ kvand_shr_or(va, v2, m, 3); \ kvand_shr_or(vb, v3, m, 2); \ kvand_shr_or(va, v4, m, 1); \ kvand_or(vb, v5, m); \ kvand_shl1_or(va, v6, m); \ kvand_shl_or(vb, v7, m, 2); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_6 { \ kvtype m = mask40, va, vb, tmp; \ kvand_shr(va, v0, m, 6); \ kvand_shr(vb, v1, m, 5); \ kvand_shr_or(va, v2, m, 4); \ kvand_shr_or(vb, v3, m, 3); \ kvand_shr_or(va, v4, m, 2); \ kvand_shr_or(vb, v5, m, 1); \ kvand_or(va, v6, m); \ kvand_shl1_or(vb, v7, m); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #define FINALIZE_NEXT_KEY_BIT_7 { \ kvtype m = mask80, va, vb, tmp; \ kvand_shr(va, v0, m, 7); \ kvand_shr(vb, v1, m, 6); \ kvand_shr_or(va, v2, m, 5); \ kvand_shr_or(vb, v3, m, 4); \ kvand_shr_or(va, v4, m, 3); \ kvand_shr_or(vb, v5, m, 2); \ kvand_shr_or(va, v6, m, 1); \ kvand_or(vb, v7, m); \ kvor(*(kvtype *)kp, va, vb); \ kp++; \ } #if DES_bs_mt static MAYBE_INLINE void DES_bs_finalize_keys(int t) #else static MAYBE_INLINE void DES_bs_finalize_keys(void) #endif { #if DES_BS_VECTOR_LOOPS_K int depth; #endif for_each_depth_k() { DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K; int ic; for (ic = 0; ic < 8; ic++) { DES_bs_vector *vp = (DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K; LOAD_V FINALIZE_NEXT_KEY_BIT_0 FINALIZE_NEXT_KEY_BIT_1 FINALIZE_NEXT_KEY_BIT_2 FINALIZE_NEXT_KEY_BIT_3 FINALIZE_NEXT_KEY_BIT_4 FINALIZE_NEXT_KEY_BIT_5 FINALIZE_NEXT_KEY_BIT_6 } } #if DES_BS_EXPAND { int index; for (index = 0; index < 0x300; index++) for_each_depth_k() { #if DES_BS_VECTOR_LOOPS_K DES_bs_all.KS.v[index] DEPTH_K = DES_bs_all.KSp[index] DEPTH_K; #else vst(*(kvtype *)&DES_bs_all.KS.v[index], 0, *(kvtype *)DES_bs_all.KSp[index]); #endif } } #endif } #endif #if DES_bs_mt MAYBE_INLINE void DES_bs_set_salt_for_thread(int t, unsigned int salt) #else void DES_bs_set_salt(ARCH_WORD salt) #endif { unsigned int new = salt; unsigned int old = DES_bs_all.salt; int dst; DES_bs_all.salt = new; for (dst = 0; dst < 24; dst++) { if ((new ^ old) & 1) { DES_bs_vector *sp1, *sp2; int src1 = dst; int src2 = dst + 24; if (new & 1) { src1 = src2; src2 = dst; } sp1 = DES_bs_all.Ens[src1]; sp2 = DES_bs_all.Ens[src2]; DES_bs_all.E.E[dst] = (ARCH_WORD *)sp1; DES_bs_all.E.E[dst + 24] = (ARCH_WORD *)sp2; DES_bs_all.E.E[dst + 48] = (ARCH_WORD *)(sp1 + 32); DES_bs_all.E.E[dst + 72] = (ARCH_WORD *)(sp2 + 32); } new >>= 1; old >>= 1; if (new == old) break; } } #if !DES_BS_ASM /* Include the S-boxes here so that the compiler can inline them */ #if DES_BS == 3 #include "sboxes-s.c" #elif DES_BS == 2 #include "sboxes.c" #else #undef andn #include "nonstd.c" #endif #define b DES_bs_all.B #define e DES_bs_all.E.E #if DES_BS_VECTOR_LOOPS #define kd [depth] #define bd [depth] #define ed [depth] #define DEPTH [depth] #define for_each_depth() \ for (depth = 0; depth < DES_BS_VECTOR; depth++) #else #if DES_BS_EXPAND #define kd #else #define kd [0] #endif #define bd #define ed [0] #define DEPTH #define for_each_depth() #endif #define DES_bs_clear_block_8(i) \ for_each_depth() { \ vst(b[i] bd, 0, zero); \ vst(b[i] bd, 1, zero); \ vst(b[i] bd, 2, zero); \ vst(b[i] bd, 3, zero); \ vst(b[i] bd, 4, zero); \ vst(b[i] bd, 5, zero); \ vst(b[i] bd, 6, zero); \ vst(b[i] bd, 7, zero); \ } #define DES_bs_clear_block \ DES_bs_clear_block_8(0); \ DES_bs_clear_block_8(8); \ DES_bs_clear_block_8(16); \ DES_bs_clear_block_8(24); \ DES_bs_clear_block_8(32); \ DES_bs_clear_block_8(40); \ DES_bs_clear_block_8(48); \ DES_bs_clear_block_8(56); #define DES_bs_set_block_8(i, v0, v1, v2, v3, v4, v5, v6, v7) \ for_each_depth() { \ vst(b[i] bd, 0, v0); \ vst(b[i] bd, 1, v1); \ vst(b[i] bd, 2, v2); \ vst(b[i] bd, 3, v3); \ vst(b[i] bd, 4, v4); \ vst(b[i] bd, 5, v5); \ vst(b[i] bd, 6, v6); \ vst(b[i] bd, 7, v7); \ } #define x(p) vxorf(*(vtype *)&e[p] ed, *(vtype *)&k[p] kd) #define y(p, q) vxorf(*(vtype *)&b[p] bd, *(vtype *)&k[q] kd) #define z(r) ((vtype *)&b[r] bd) void DES_bs_crypt_25(int keys_count) { #if DES_bs_mt int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH; #endif #ifdef _OPENMP #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count) #endif for_each_t(n) { #if DES_BS_EXPAND DES_bs_vector *k; #else ARCH_WORD **k; #endif int iterations, rounds_and_swapped; #if DES_BS_VECTOR_LOOPS int depth; #endif if (DES_bs_all.keys_changed) goto finalize_keys; body: #if DES_bs_mt DES_bs_set_salt_for_thread(t, DES_bs_all_by_tnum(-1).salt); #endif { vtype zero = vzero; DES_bs_clear_block } #if DES_BS_EXPAND k = DES_bs_all.KS.v; #else k = DES_bs_all.KS.p; #endif rounds_and_swapped = 8; iterations = 25; start: for_each_depth() s1(x(0), x(1), x(2), x(3), x(4), x(5), z(40), z(48), z(54), z(62)); for_each_depth() s2(x(6), x(7), x(8), x(9), x(10), x(11), z(44), z(59), z(33), z(49)); for_each_depth() s3(y(7, 12), y(8, 13), y(9, 14), y(10, 15), y(11, 16), y(12, 17), z(55), z(47), z(61), z(37)); for_each_depth() s4(y(11, 18), y(12, 19), y(13, 20), y(14, 21), y(15, 22), y(16, 23), z(57), z(51), z(41), z(32)); for_each_depth() s5(x(24), x(25), x(26), x(27), x(28), x(29), z(39), z(45), z(56), z(34)); for_each_depth() s6(x(30), x(31), x(32), x(33), x(34), x(35), z(35), z(60), z(42), z(50)); for_each_depth() s7(y(23, 36), y(24, 37), y(25, 38), y(26, 39), y(27, 40), y(28, 41), z(63), z(43), z(53), z(38)); for_each_depth() s8(y(27, 42), y(28, 43), y(29, 44), y(30, 45), y(31, 46), y(0, 47), z(36), z(58), z(46), z(52)); if (rounds_and_swapped == 0x100) goto next; swap: for_each_depth() s1(x(48), x(49), x(50), x(51), x(52), x(53), z(8), z(16), z(22), z(30)); for_each_depth() s2(x(54), x(55), x(56), x(57), x(58), x(59), z(12), z(27), z(1), z(17)); for_each_depth() s3(y(39, 60), y(40, 61), y(41, 62), y(42, 63), y(43, 64), y(44, 65), z(23), z(15), z(29), z(5)); for_each_depth() s4(y(43, 66), y(44, 67), y(45, 68), y(46, 69), y(47, 70), y(48, 71), z(25), z(19), z(9), z(0)); for_each_depth() s5(x(72), x(73), x(74), x(75), x(76), x(77), z(7), z(13), z(24), z(2)); for_each_depth() s6(x(78), x(79), x(80), x(81), x(82), x(83), z(3), z(28), z(10), z(18)); for_each_depth() s7(y(55, 84), y(56, 85), y(57, 86), y(58, 87), y(59, 88), y(60, 89), z(31), z(11), z(21), z(6)); for_each_depth() s8(y(59, 90), y(60, 91), y(61, 92), y(62, 93), y(63, 94), y(32, 95), z(4), z(26), z(14), z(20)); k += 96; if (--rounds_and_swapped) goto start; k -= (0x300 + 48); rounds_and_swapped = 0x108; if (--iterations) goto swap; #if DES_bs_mt continue; #else return; #endif next: k -= (0x300 - 48); rounds_and_swapped = 8; iterations--; goto start; finalize_keys: DES_bs_all.keys_changed = 0; #if DES_bs_mt DES_bs_finalize_keys(t); #else DES_bs_finalize_keys(); #endif goto body; } } void DES_bs_crypt(int count, int keys_count) { #if DES_bs_mt int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH; #endif #ifdef _OPENMP #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, count, keys_count) #endif for_each_t(n) { #if DES_BS_EXPAND DES_bs_vector *k; #else ARCH_WORD **k; #endif int iterations, rounds_and_swapped; #if DES_BS_VECTOR_LOOPS int depth; #endif if (DES_bs_all.keys_changed) goto finalize_keys; body: #if DES_bs_mt DES_bs_set_salt_for_thread(t, DES_bs_all_by_tnum(-1).salt); #endif { vtype zero = vzero; DES_bs_clear_block } #if DES_BS_EXPAND k = DES_bs_all.KS.v; #else k = DES_bs_all.KS.p; #endif rounds_and_swapped = 8; iterations = count; start: for_each_depth() s1(x(0), x(1), x(2), x(3), x(4), x(5), z(40), z(48), z(54), z(62)); for_each_depth() s2(x(6), x(7), x(8), x(9), x(10), x(11), z(44), z(59), z(33), z(49)); for_each_depth() s3(x(12), x(13), x(14), x(15), x(16), x(17), z(55), z(47), z(61), z(37)); for_each_depth() s4(x(18), x(19), x(20), x(21), x(22), x(23), z(57), z(51), z(41), z(32)); for_each_depth() s5(x(24), x(25), x(26), x(27), x(28), x(29), z(39), z(45), z(56), z(34)); for_each_depth() s6(x(30), x(31), x(32), x(33), x(34), x(35), z(35), z(60), z(42), z(50)); for_each_depth() s7(x(36), x(37), x(38), x(39), x(40), x(41), z(63), z(43), z(53), z(38)); for_each_depth() s8(x(42), x(43), x(44), x(45), x(46), x(47), z(36), z(58), z(46), z(52)); if (rounds_and_swapped == 0x100) goto next; swap: for_each_depth() s1(x(48), x(49), x(50), x(51), x(52), x(53), z(8), z(16), z(22), z(30)); for_each_depth() s2(x(54), x(55), x(56), x(57), x(58), x(59), z(12), z(27), z(1), z(17)); for_each_depth() s3(x(60), x(61), x(62), x(63), x(64), x(65), z(23), z(15), z(29), z(5)); for_each_depth() s4(x(66), x(67), x(68), x(69), x(70), x(71), z(25), z(19), z(9), z(0)); for_each_depth() s5(x(72), x(73), x(74), x(75), x(76), x(77), z(7), z(13), z(24), z(2)); for_each_depth() s6(x(78), x(79), x(80), x(81), x(82), x(83), z(3), z(28), z(10), z(18)); for_each_depth() s7(x(84), x(85), x(86), x(87), x(88), x(89), z(31), z(11), z(21), z(6)); for_each_depth() s8(x(90), x(91), x(92), x(93), x(94), x(95), z(4), z(26), z(14), z(20)); k += 96; if (--rounds_and_swapped) goto start; k -= (0x300 + 48); rounds_and_swapped = 0x108; if (--iterations) goto swap; #if DES_bs_mt continue; #else return; #endif next: k -= (0x300 - 48); rounds_and_swapped = 8; if (--iterations) goto start; #if DES_bs_mt continue; #else return; #endif finalize_keys: DES_bs_all.keys_changed = 0; #if DES_bs_mt DES_bs_finalize_keys(t); #else DES_bs_finalize_keys(); #endif goto body; } } #undef x #if DES_bs_mt static MAYBE_INLINE void DES_bs_finalize_keys_LM(int t) #else static MAYBE_INLINE void DES_bs_finalize_keys_LM(void) #endif { #if DES_BS_VECTOR_LOOPS_K int depth; #endif for_each_depth_k() { DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K; int ic; for (ic = 0; ic < 7; ic++) { DES_bs_vector *vp = (DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K; LOAD_V FINALIZE_NEXT_KEY_BIT_0 FINALIZE_NEXT_KEY_BIT_1 FINALIZE_NEXT_KEY_BIT_2 FINALIZE_NEXT_KEY_BIT_3 FINALIZE_NEXT_KEY_BIT_4 FINALIZE_NEXT_KEY_BIT_5 FINALIZE_NEXT_KEY_BIT_6 FINALIZE_NEXT_KEY_BIT_7 } } } #undef kd #if DES_BS_VECTOR_LOOPS #define kd [depth] #else #define kd [0] #endif int DES_bs_crypt_LM(int *pcount, struct db_salt *salt) { int keys_count = *pcount; #if DES_bs_mt int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH; #endif #ifdef _OPENMP #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count) #endif for_each_t(n) { ARCH_WORD **k; int rounds; #if DES_BS_VECTOR_LOOPS int depth; #endif { vtype z = vzero, o = vones; DES_bs_set_block_8(0, z, z, z, z, z, z, z, z); DES_bs_set_block_8(8, o, o, o, z, o, z, z, z); DES_bs_set_block_8(16, z, z, z, z, z, z, z, o); DES_bs_set_block_8(24, z, z, o, z, z, o, o, o); DES_bs_set_block_8(32, z, z, z, o, z, o, o, o); DES_bs_set_block_8(40, z, z, z, z, z, o, z, z); DES_bs_set_block_8(48, o, o, z, z, z, z, o, z); DES_bs_set_block_8(56, o, z, o, z, o, o, o, o); } #if DES_bs_mt DES_bs_finalize_keys_LM(t); #else DES_bs_finalize_keys_LM(); #endif k = DES_bs_all.KS.p; rounds = 8; do { for_each_depth() s1(y(31, 0), y(0, 1), y(1, 2), y(2, 3), y(3, 4), y(4, 5), z(40), z(48), z(54), z(62)); for_each_depth() s2(y(3, 6), y(4, 7), y(5, 8), y(6, 9), y(7, 10), y(8, 11), z(44), z(59), z(33), z(49)); for_each_depth() s3(y(7, 12), y(8, 13), y(9, 14), y(10, 15), y(11, 16), y(12, 17), z(55), z(47), z(61), z(37)); for_each_depth() s4(y(11, 18), y(12, 19), y(13, 20), y(14, 21), y(15, 22), y(16, 23), z(57), z(51), z(41), z(32)); for_each_depth() s5(y(15, 24), y(16, 25), y(17, 26), y(18, 27), y(19, 28), y(20, 29), z(39), z(45), z(56), z(34)); for_each_depth() s6(y(19, 30), y(20, 31), y(21, 32), y(22, 33), y(23, 34), y(24, 35), z(35), z(60), z(42), z(50)); for_each_depth() s7(y(23, 36), y(24, 37), y(25, 38), y(26, 39), y(27, 40), y(28, 41), z(63), z(43), z(53), z(38)); for_each_depth() s8(y(27, 42), y(28, 43), y(29, 44), y(30, 45), y(31, 46), y(0, 47), z(36), z(58), z(46), z(52)); for_each_depth() s1(y(63, 48), y(32, 49), y(33, 50), y(34, 51), y(35, 52), y(36, 53), z(8), z(16), z(22), z(30)); for_each_depth() s2(y(35, 54), y(36, 55), y(37, 56), y(38, 57), y(39, 58), y(40, 59), z(12), z(27), z(1), z(17)); for_each_depth() s3(y(39, 60), y(40, 61), y(41, 62), y(42, 63), y(43, 64), y(44, 65), z(23), z(15), z(29), z(5)); for_each_depth() s4(y(43, 66), y(44, 67), y(45, 68), y(46, 69), y(47, 70), y(48, 71), z(25), z(19), z(9), z(0)); for_each_depth() s5(y(47, 72), y(48, 73), y(49, 74), y(50, 75), y(51, 76), y(52, 77), z(7), z(13), z(24), z(2)); for_each_depth() s6(y(51, 78), y(52, 79), y(53, 80), y(54, 81), y(55, 82), y(56, 83), z(3), z(28), z(10), z(18)); for_each_depth() s7(y(55, 84), y(56, 85), y(57, 86), y(58, 87), y(59, 88), y(60, 89), z(31), z(11), z(21), z(6)); for_each_depth() s8(y(59, 90), y(60, 91), y(61, 92), y(62, 93), y(63, 94), y(32, 95), z(4), z(26), z(14), z(20)); k += 96; } while (--rounds); } return keys_count; } #if DES_bs_mt static MAYBE_INLINE void DES_bs_finalize_keys_plain(int t) #else static MAYBE_INLINE void DES_bs_finalize_keys_plain(void) #endif { #if DES_BS_VECTOR_LOOPS_K int depth; #endif for_each_depth_k() { DES_bs_vector *kp = (DES_bs_vector *)&DES_bs_all.K[0] DEPTH_K; int ic; for (ic = 0; ic < 8; ic++) { DES_bs_vector *vp = (DES_bs_vector *)&DES_bs_all.xkeys.v[ic][0] DEPTH_K; LOAD_V FINALIZE_NEXT_KEY_BIT_0 FINALIZE_NEXT_KEY_BIT_1 FINALIZE_NEXT_KEY_BIT_2 FINALIZE_NEXT_KEY_BIT_3 FINALIZE_NEXT_KEY_BIT_4 FINALIZE_NEXT_KEY_BIT_5 FINALIZE_NEXT_KEY_BIT_6 } } } #undef v1 #undef v2 #undef v3 #undef v5 #undef v6 #undef v7 /* Single Des Encryption with no salt */ #undef kd #if DES_BS_VECTOR_LOOPS #define kd [depth] #else #define kd [0] #endif #if DES_BS_VECTOR #define INDX [index] #else #define INDX #endif void DES_bs_crypt_plain(int keys_count) { #if DES_bs_mt int t, n = (keys_count + (DES_BS_DEPTH - 1)) / DES_BS_DEPTH; #endif #ifdef _OPENMP #pragma omp parallel for default(none) private(t) shared(n, DES_bs_all_p, keys_count, DES_bs_P) #endif for_each_t(n) { ARCH_WORD **k; int rounds; #if DES_BS_VECTOR_LOOPS int depth; #endif int i; #if DES_BS_VECTOR int index; #endif for (i=0; i<64; i++) { #if DES_BS_VECTOR for (index=0; index<DES_BS_VECTOR_SIZE; index++) #endif DES_bs_all.B[i]INDX = DES_bs_P[i]INDX; } #if DES_bs_mt DES_bs_finalize_keys_plain(t); #else DES_bs_finalize_keys_plain(); #endif k = DES_bs_all.KS.p; rounds = 8; do { for_each_depth() s1(y(31, 0), y(0, 1), y(1, 2), y(2, 3), y(3, 4), y(4, 5), z(40), z(48), z(54), z(62)); for_each_depth() s2(y(3, 6), y(4, 7), y(5, 8), y(6, 9), y(7, 10), y(8, 11), z(44), z(59), z(33), z(49)); for_each_depth() s3(y(7, 12), y(8, 13), y(9, 14), y(10, 15), y(11, 16), y(12, 17), z(55), z(47), z(61), z(37)); for_each_depth() s4(y(11, 18), y(12, 19), y(13, 20), y(14, 21), y(15, 22), y(16, 23), z(57), z(51), z(41), z(32)); for_each_depth() s5(y(15, 24), y(16, 25), y(17, 26), y(18, 27), y(19, 28), y(20, 29), z(39), z(45), z(56), z(34)); for_each_depth() s6(y(19, 30), y(20, 31), y(21, 32), y(22, 33), y(23, 34), y(24, 35), z(35), z(60), z(42), z(50)); for_each_depth() s7(y(23, 36), y(24, 37), y(25, 38), y(26, 39), y(27, 40), y(28, 41), z(63), z(43), z(53), z(38)); for_each_depth() s8(y(27, 42), y(28, 43), y(29, 44), y(30, 45), y(31, 46), y(0, 47), z(36), z(58), z(46), z(52)); for_each_depth() s1(y(63, 48), y(32, 49), y(33, 50), y(34, 51), y(35, 52), y(36, 53), z(8), z(16), z(22), z(30)); for_each_depth() s2(y(35, 54), y(36, 55), y(37, 56), y(38, 57), y(39, 58), y(40, 59), z(12), z(27), z(1), z(17)); for_each_depth() s3(y(39, 60), y(40, 61), y(41, 62), y(42, 63), y(43, 64), y(44, 65), z(23), z(15), z(29), z(5)); for_each_depth() s4(y(43, 66), y(44, 67), y(45, 68), y(46, 69), y(47, 70), y(48, 71), z(25), z(19), z(9), z(0)); for_each_depth() s5(y(47, 72), y(48, 73), y(49, 74), y(50, 75), y(51, 76), y(52, 77), z(7), z(13), z(24), z(2)); for_each_depth() s6(y(51, 78), y(52, 79), y(53, 80), y(54, 81), y(55, 82), y(56, 83), z(3), z(28), z(10), z(18)); for_each_depth() s7(y(55, 84), y(56, 85), y(57, 86), y(58, 87), y(59, 88), y(60, 89), z(31), z(11), z(21), z(6)); for_each_depth() s8(y(59, 90), y(60, 91), y(61, 92), y(62, 93), y(63, 94), y(32, 95), z(4), z(26), z(14), z(20)); k += 96; } while (--rounds); }} #endif #ifdef INDX #undef INDX #endif #if DES_BS_VECTOR #define INDX [k] #else #define INDX #endif void DES_bs_generate_plaintext(unsigned char *plaintext) { int i, j; #if DES_BS_VECTOR int k; #endif /* Set same plaintext for all bit layers */ for (i = 0; i < 64; i++) { j = (int) (plaintext[i/8] >> (7-(i%8))) & 0x01; if (j==1) j = -1; #if DES_BS_VECTOR for (k=0; k<DES_BS_VECTOR_SIZE; k++) #endif DES_bs_P[i]INDX = j; } }
cityblock.c
#include <math.h> void cbdm(double *a, double *b, double *r, int num_rows, int num_cols) { double _r; #pragma omp parallel for reduction (+:_r) for(int i = 0; i < num_rows; i++) { for(int j = 0; j < num_rows ; j++) { _r = 0.0; for(int k = 0; k < num_cols ; k++) { _r += fabs(a[i * num_cols + k] - b[j * num_cols + k]); } r[i * num_rows + j] = _r; } } }
ocp_nlp_sqp_rti.c
/* * Copyright 2019 Gianluca Frison, Dimitris Kouzoupis, Robin Verschueren, * Andrea Zanelli, Niels van Duijkeren, Jonathan Frey, Tommaso Sartor, * Branimir Novoselnik, Rien Quirynen, Rezart Qelibari, Dang Doan, * Jonas Koenemann, Yutao Chen, Tobias Schöls, Jonas Schlagenhauf, Moritz Diehl * * This file is part of acados. * * The 2-Clause BSD License * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE.; */ #include "acados/ocp_nlp/ocp_nlp_sqp_rti.h" // external #include <assert.h> #include <math.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #if defined(ACADOS_WITH_OPENMP) #include <omp.h> #endif // blasfeo #include "blasfeo/include/blasfeo_d_aux.h" #include "blasfeo/include/blasfeo_d_aux_ext_dep.h" #include "blasfeo/include/blasfeo_d_blas.h" // acados #include "acados/ocp_nlp/ocp_nlp_common.h" #include "acados/ocp_nlp/ocp_nlp_dynamics_cont.h" #include "acados/ocp_nlp/ocp_nlp_reg_common.h" #include "acados/ocp_qp/ocp_qp_common.h" #include "acados/utils/mem.h" #include "acados/utils/print.h" #include "acados/utils/timing.h" #include "acados/utils/types.h" #include "acados_c/ocp_qp_interface.h" /************************************************ * options ************************************************/ int ocp_nlp_sqp_rti_opts_calculate_size(void *config_, void *dims_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; int size = 0; size += sizeof(ocp_nlp_sqp_rti_opts); size += ocp_nlp_opts_calculate_size(config, dims); return size; } void *ocp_nlp_sqp_rti_opts_assign(void *config_, void *dims_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; char *c_ptr = (char *) raw_memory; ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_rti_opts); opts->nlp_opts = ocp_nlp_opts_assign(config, dims, c_ptr); c_ptr += ocp_nlp_opts_calculate_size(config, dims); assert((char *) raw_memory + ocp_nlp_sqp_rti_opts_calculate_size(config, dims) >= c_ptr); return opts; } void ocp_nlp_sqp_rti_opts_initialize_default(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_constraints_config **constraints = config->constraints; // int ii; // int N = dims->N; // this first !!! ocp_nlp_opts_initialize_default(config, dims, nlp_opts); // SQP RTI opts opts->ext_qp_res = 0; opts->warm_start_first_qp = false; opts->rti_phase = 0; opts->print_level = 0; // overwrite default submodules opts // do not compute adjoint in dynamics and constraints // int compute_adj = 0; // // dynamics // for (ii = 0; ii < N; ii++) // { // dynamics[ii]->opts_set(dynamics[ii], // opts->nlp_opts->dynamics[ii], "compute_adj", &compute_adj); // } // // constraints // for (ii = 0; ii <= N; ii++) // { // constraints[ii]->opts_set(constraints[ii], // opts->nlp_opts->constraints[ii], "compute_adj", &compute_adj); // } return; } void ocp_nlp_sqp_rti_opts_update(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_update(config, dims, nlp_opts); return; } void ocp_nlp_sqp_rti_opts_set(void *config_, void *opts_, const char *field, void* value) { ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_; ocp_nlp_config *config = config_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int ii; char module[MAX_STR_LEN]; char *ptr_module = NULL; int module_length = 0; // extract module name char *char_ = strchr(field, '_'); if (char_!=NULL) { module_length = char_-field; for (ii=0; ii<module_length; ii++) module[ii] = field[ii]; module[module_length] = '\0'; // add end of string ptr_module = module; } // pass options to QP module if ( ptr_module!=NULL && (!strcmp(ptr_module, "qp")) ) { ocp_nlp_opts_set(config, nlp_opts, field, value); if (!strcmp(field, "qp_warm_start")) { int* i_ptr = (int *) value; opts->qp_warm_start = *i_ptr; } } else // nlp opts { if (!strcmp(field, "ext_qp_res")) { int* ext_qp_res = (int *) value; opts->ext_qp_res = *ext_qp_res; } else if (!strcmp(field, "warm_start_first_qp")) { bool* warm_start_first_qp = (bool *) value; opts->warm_start_first_qp = *warm_start_first_qp; } else if (!strcmp(field, "rti_phase")) { int* rti_phase = (int *) value; if (*rti_phase < 0 || *rti_phase > 2) { printf("\nerror: ocp_nlp_sqp_opts_set: invalid value for rti_phase field."); printf("possible values are: 0, 1, 2\n"); exit(1); } else opts->rti_phase = *rti_phase; } else if (!strcmp(field, "print_level")) { int* print_level = (int *) value; if (*print_level < 0) { printf("\nerror: ocp_nlp_sqp_rti_opts_set: invalid value for print_level field, need int >=0, got %d.", *print_level); exit(1); } opts->print_level = *print_level; } else { ocp_nlp_opts_set(config, nlp_opts, field, value); } } return; } void ocp_nlp_sqp_rti_opts_set_at_stage(void *config_, void *opts_, int stage, const char *field, void* value) { ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = (ocp_nlp_sqp_rti_opts *) opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_opts_set_at_stage(config, nlp_opts, stage, field, value); return; } /************************************************ * memory ************************************************/ int ocp_nlp_sqp_rti_memory_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; int size = 0; size += sizeof(ocp_nlp_sqp_rti_memory); // nlp mem size += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat int stat_m = 1+1; int stat_n = 2; if (opts->ext_qp_res) stat_n += 4; size += stat_n*stat_m*sizeof(double); size += 8; // initial align make_int_multiple_of(8, &size); return size; } void *ocp_nlp_sqp_rti_memory_assign(void *config_, void *dims_, void *opts_, void *raw_memory) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; // ocp_nlp_dynamics_config **dynamics = config->dynamics; // ocp_nlp_cost_config **cost = config->cost; // ocp_nlp_constraints_config **constraints = config->constraints; char *c_ptr = (char *) raw_memory; // int ii; // int N = dims->N; // int *nx = dims->nx; // int *nu = dims->nu; // int *nz = dims->nz; // initial align align_char_to(8, &c_ptr); ocp_nlp_sqp_rti_memory *mem = (ocp_nlp_sqp_rti_memory *) c_ptr; c_ptr += sizeof(ocp_nlp_sqp_rti_memory); // nlp mem mem->nlp_mem = ocp_nlp_memory_assign(config, dims, nlp_opts, c_ptr); c_ptr += ocp_nlp_memory_calculate_size(config, dims, nlp_opts); // stat mem->stat = (double *) c_ptr; mem->stat_m = 1+1; mem->stat_n = 2; if (opts->ext_qp_res) mem->stat_n += 4; c_ptr += mem->stat_m*mem->stat_n*sizeof(double); mem->status = ACADOS_READY; assert((char *) raw_memory+ocp_nlp_sqp_rti_memory_calculate_size( config, dims, opts) >= c_ptr); return mem; } /************************************************ * workspace ************************************************/ int ocp_nlp_sqp_rti_workspace_calculate_size(void *config_, void *dims_, void *opts_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; int size = 0; // sqp size += sizeof(ocp_nlp_sqp_rti_workspace); // nlp size += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // qp in size += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out size += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res size += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws size += ocp_qp_res_workspace_calculate_size(dims->qp_solver->orig_dims); } return size; } static void ocp_nlp_sqp_rti_cast_workspace( ocp_nlp_config *config, ocp_nlp_dims *dims, ocp_nlp_sqp_rti_opts *opts, ocp_nlp_sqp_rti_memory *mem, ocp_nlp_sqp_rti_workspace *work) { ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // sqp char *c_ptr = (char *) work; c_ptr += sizeof(ocp_nlp_sqp_rti_workspace); // nlp work->nlp_work = ocp_nlp_workspace_assign( config, dims, nlp_opts, nlp_mem, c_ptr); c_ptr += ocp_nlp_workspace_calculate_size(config, dims, nlp_opts); // qp in work->tmp_qp_in = ocp_qp_in_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_in_calculate_size(dims->qp_solver->orig_dims); // qp out work->tmp_qp_out = ocp_qp_out_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_out_calculate_size(dims->qp_solver->orig_dims); if (opts->ext_qp_res) { // qp res work->qp_res = ocp_qp_res_assign(dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_calculate_size(dims->qp_solver->orig_dims); // qp res ws work->qp_res_ws = ocp_qp_res_workspace_assign( dims->qp_solver->orig_dims, c_ptr); c_ptr += ocp_qp_res_workspace_calculate_size( dims->qp_solver->orig_dims); } assert((char *) work + ocp_nlp_sqp_rti_workspace_calculate_size(config, dims, opts) >= c_ptr); return; } /************************************************ * functions ************************************************/ int ocp_nlp_sqp_rti(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_sqp_rti_memory *mem = mem_; // zero timers acados_timer timer0; double total_time = 0.0; mem->time_tot = 0.0; ocp_nlp_sqp_rti_opts *nlp_opts = opts_; int rti_phase = nlp_opts->rti_phase; acados_tic(&timer0); switch(rti_phase) { // perform preparation and feedback rti_phase case 0: ocp_nlp_sqp_rti_preparation_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); ocp_nlp_sqp_rti_feedback_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); break; // perform preparation rti_phase case 1: ocp_nlp_sqp_rti_preparation_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); break; // perform feedback rti_phase case 2: ocp_nlp_sqp_rti_feedback_step( config_, dims_, nlp_in_, nlp_out_, opts_, mem_, work_); break; } total_time += acados_toc(&timer0); mem->time_tot = total_time; nlp_out->total_time = total_time; return mem->status; } void ocp_nlp_sqp_rti_preparation_step(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer1; ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; // ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; mem->time_lin = 0.0; mem->time_reg = 0.0; int N = dims->N; int ii; #if defined(ACADOS_WITH_OPENMP) // backup number of threads int num_threads_bkp = omp_get_num_threads(); // set number of threads omp_set_num_threads(opts->nlp_opts->num_threads); #pragma omp parallel { // beginning of parallel region #endif // alias to dynamics_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii < N; ii++) { config->dynamics[ii]->memory_set_ux_ptr( nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux_ptr( nlp_work->tmp_nlp_out->ux+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_ux1_ptr( nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_ux1_ptr( nlp_work->tmp_nlp_out->ux+ii+1, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_pi_ptr( nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_tmp_pi_ptr( nlp_work->tmp_nlp_out->pi+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_BAbt_ptr( nlp_mem->qp_in->BAbt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_RSQrq_ptr( nlp_mem->qp_in->RSQrq+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_dzduxt_ptr( nlp_mem->dzduxt+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_sim_guess_ptr( nlp_mem->sim_guess+ii, nlp_mem->set_sim_guess+ii, nlp_mem->dynamics[ii]); config->dynamics[ii]->memory_set_z_alg_ptr( nlp_mem->z_alg+ii, nlp_mem->dynamics[ii]); } // alias to cost_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii <= N; ii++) { config->cost[ii]->memory_set_ux_ptr( nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_tmp_ux_ptr( nlp_work->tmp_nlp_out->ux+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_z_alg_ptr( nlp_mem->z_alg+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_dzdux_tran_ptr( nlp_mem->dzduxt+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_RSQrq_ptr( nlp_mem->qp_in->RSQrq+ii, nlp_mem->cost[ii]); config->cost[ii]->memory_set_Z_ptr( nlp_mem->qp_in->Z+ii, nlp_mem->cost[ii]); } // alias to constraints_memory #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif for (ii = 0; ii <= N; ii++) { config->constraints[ii]->memory_set_ux_ptr( nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_ux_ptr( nlp_work->tmp_nlp_out->ux+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_lam_ptr( nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_tmp_lam_ptr( nlp_work->tmp_nlp_out->lam+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_z_alg_ptr( nlp_mem->z_alg+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_dzdux_tran_ptr( nlp_mem->dzduxt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_DCt_ptr( nlp_mem->qp_in->DCt+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_RSQrq_ptr( nlp_mem->qp_in->RSQrq+ii, nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxb_ptr( nlp_mem->qp_in->idxb[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxs_rev_ptr( nlp_mem->qp_in->idxs_rev[ii], nlp_mem->constraints[ii]); config->constraints[ii]->memory_set_idxe_ptr( nlp_mem->qp_in->idxe[ii], nlp_mem->constraints[ii]); } // alias to regularize memory config->regularize->memory_set_RSQrq_ptr( dims->regularize, nlp_mem->qp_in->RSQrq, nlp_mem->regularize_mem); config->regularize->memory_set_rq_ptr( dims->regularize, nlp_mem->qp_in->rqz, nlp_mem->regularize_mem); config->regularize->memory_set_BAbt_ptr( dims->regularize, nlp_mem->qp_in->BAbt, nlp_mem->regularize_mem); config->regularize->memory_set_b_ptr( dims->regularize, nlp_mem->qp_in->b, nlp_mem->regularize_mem); config->regularize->memory_set_idxb_ptr( dims->regularize, nlp_mem->qp_in->idxb, nlp_mem->regularize_mem); config->regularize->memory_set_DCt_ptr( dims->regularize, nlp_mem->qp_in->DCt, nlp_mem->regularize_mem); config->regularize->memory_set_ux_ptr( dims->regularize, nlp_mem->qp_out->ux, nlp_mem->regularize_mem); config->regularize->memory_set_pi_ptr( dims->regularize, nlp_mem->qp_out->pi, nlp_mem->regularize_mem); config->regularize->memory_set_lam_ptr( dims->regularize, nlp_mem->qp_out->lam, nlp_mem->regularize_mem); // copy sampling times into dynamics model #if defined(ACADOS_WITH_OPENMP) #pragma omp for nowait #endif // NOTE(oj): this will lead in an error for irk_gnsf, T must be set in precompute; // -> remove here and make sure precompute is called everywhere (e.g. Python interface). for (ii = 0; ii < N; ii++) { config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); } #if defined(ACADOS_WITH_OPENMP) } // end of parallel region #endif // initialize QP ocp_nlp_initialize_qp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); /* SQP body */ int sqp_iter = 0; nlp_mem->sqp_iter = &sqp_iter; // linearizate NLP and update QP matrices acados_tic(&timer1); ocp_nlp_approximate_qp_matrices(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); mem->time_lin += acados_toc(&timer1); #if defined(ACADOS_WITH_OPENMP) // restore number of threads omp_set_num_threads(num_threads_bkp); #endif return; } void ocp_nlp_sqp_rti_feedback_step(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { acados_timer timer1; ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_opts *nlp_opts = opts->nlp_opts; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_qp_xcond_solver_config *qp_solver = config->qp_solver; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int qp_iter = 0; int qp_status = 0; double tmp_time; mem->time_qp_sol = 0.0; mem->time_qp_solver_call = 0.0; mem->time_qp_xcond = 0.0; // embed initial value (this actually updates all bounds at stage 0...) ocp_nlp_embed_initial_value(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // update QP rhs for SQP (step prim var, abs dual var) ocp_nlp_approximate_qp_vectors_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // regularize Hessian acados_tic(&timer1); config->regularize->regularize_hessian(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); if (opts->print_level > 0) { printf("\n------- qp_in --------\n"); print_ocp_qp_in(nlp_mem->qp_in); } if (!opts->warm_start_first_qp) { int tmp_int = 0; config->qp_solver->opts_set(config->qp_solver, opts->nlp_opts->qp_solver_opts, "warm_start", &tmp_int); } // solve qp acados_tic(&timer1); qp_status = qp_solver->evaluate(qp_solver, dims->qp_solver, nlp_mem->qp_in, nlp_mem->qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); mem->time_qp_sol += acados_toc(&timer1); qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_solver_call", &tmp_time); mem->time_qp_solver_call += tmp_time; qp_solver->memory_get(qp_solver, nlp_mem->qp_solver_mem, "time_qp_xcond", &tmp_time); mem->time_qp_xcond += tmp_time; // compute correct dual solution in case of Hessian regularization acados_tic(&timer1); config->regularize->correct_dual_sol(config->regularize, dims->regularize, opts->nlp_opts->regularize, nlp_mem->regularize_mem); mem->time_reg += acados_toc(&timer1); // TODO move into QP solver memory ??? qp_info *qp_info_; ocp_qp_out_get(nlp_mem->qp_out, "qp_info", &qp_info_); nlp_out->qp_iter = qp_info_->num_iter; qp_iter = qp_info_->num_iter; // compute external QP residuals (for debugging) if (opts->ext_qp_res) { ocp_qp_res_compute(nlp_mem->qp_in, nlp_mem->qp_out, work->qp_res, work->qp_res_ws); ocp_qp_res_compute_nrm_inf(work->qp_res, mem->stat+(mem->stat_n*1+2)); // printf("\nsqp_iter %d, res %e %e %e %e\n", sqp_iter, // inf_norm_qp_res[0], inf_norm_qp_res[1], // inf_norm_qp_res[2], inf_norm_qp_res[3]); } // printf("\n------- qp_out (sqp iter %d) ---------\n", sqp_iter); // print_ocp_qp_out(nlp_mem->qp_out); // exit(1); // save statistics mem->stat[mem->stat_n*1+0] = qp_status; mem->stat[mem->stat_n*1+1] = qp_iter; if ((qp_status!=ACADOS_SUCCESS) & (qp_status!=ACADOS_MAXITER)) { // print_ocp_qp_in(mem->qp_in); #ifndef ACADOS_SILENT printf("QP solver returned error status %d\n", qp_status); #endif mem->status = ACADOS_QP_FAILURE; return; } ocp_nlp_update_variables_sqp(config, dims, nlp_in, nlp_out, nlp_opts, nlp_mem, nlp_work); // ocp_nlp_dims_print(nlp_out->dims); // ocp_nlp_out_print(nlp_out); // exit(1); // print_ocp_qp_in(mem->qp_in); mem->status = ACADOS_SUCCESS; } int ocp_nlp_sqp_rti_precompute(void *config_, void *dims_, void *nlp_in_, void *nlp_out_, void *opts_, void *mem_, void *work_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_in *nlp_in = nlp_in_; // ocp_nlp_out *nlp_out = nlp_out_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; int N = dims->N; int status = ACADOS_SUCCESS; int ii; // TODO(giaf) flag to enable/disable checks for (ii = 0; ii <= N; ii++) { int module_val; config->constraints[ii]->dims_get(config->constraints[ii], dims->constraints[ii], "ns", &module_val); if (dims->ns[ii] != module_val) { printf("ocp_nlp_sqp_rti_precompute: inconsistent dimension ns \ for stage %d with constraint module, got %d, module: %d.", ii, dims->ns[ii], module_val); exit(1); } } // precompute for (ii = 0; ii < N; ii++) { // set T config->dynamics[ii]->model_set(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], "T", nlp_in->Ts+ii); // dynamics precompute status = config->dynamics[ii]->precompute(config->dynamics[ii], dims->dynamics[ii], nlp_in->dynamics[ii], opts->nlp_opts->dynamics[ii], nlp_mem->dynamics[ii], nlp_work->dynamics[ii]); if (status != ACADOS_SUCCESS) return status; } return status; } void ocp_nlp_sqp_rti_eval_param_sens(void *config_, void *dims_, void *opts_, void *mem_, void *work_, char *field, int stage, int index, void *sens_nlp_out_) { ocp_nlp_dims *dims = dims_; ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; ocp_nlp_sqp_rti_memory *mem = mem_; ocp_nlp_memory *nlp_mem = mem->nlp_mem; ocp_nlp_out *sens_nlp_out = sens_nlp_out_; ocp_nlp_sqp_rti_workspace *work = work_; ocp_nlp_sqp_rti_cast_workspace(config, dims, opts, mem, work); ocp_nlp_workspace *nlp_work = work->nlp_work; d_ocp_qp_copy_all(nlp_mem->qp_in, work->tmp_qp_in); d_ocp_qp_set_rhs_zero(work->tmp_qp_in); double one = 1.0; if ((!strcmp("ex", field)) & (stage==0)) { d_ocp_qp_set_el("lbx", stage, index, &one, work->tmp_qp_in); d_ocp_qp_set_el("ubx", stage, index, &one, work->tmp_qp_in); // d_ocp_qp_print(work->tmp_qp_in->dim, work->tmp_qp_in); config->qp_solver->eval_sens(config->qp_solver, dims->qp_solver, work->tmp_qp_in, work->tmp_qp_out, opts->nlp_opts->qp_solver_opts, nlp_mem->qp_solver_mem, nlp_work->qp_work); // d_ocp_qp_sol_print(work->tmp_qp_out->dim, work->tmp_qp_out); // exit(1); /* copy tmp_qp_out into sens_nlp_out */ int i; int N = dims->N; int *nv = dims->nv; int *nx = dims->nx; // int *nu = dims->nu; int *ni = dims->ni; // int *nz = dims->nz; for (i = 0; i <= N; i++) { blasfeo_dveccp(nv[i], work->tmp_qp_out->ux + i, 0, sens_nlp_out->ux + i, 0); if (i < N) blasfeo_dveccp(nx[i + 1], work->tmp_qp_out->pi + i, 0, sens_nlp_out->pi + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->lam + i, 0, sens_nlp_out->lam + i, 0); blasfeo_dveccp(2 * ni[i], work->tmp_qp_out->t + i, 0, sens_nlp_out->t + i, 0); } } else { printf("\nerror: field %s at stage %d not available in \ ocp_nlp_sqp_rti_eval_param_sens\n", field, stage); exit(1); } return; } // TODO rename memory_get ??? void ocp_nlp_sqp_rti_get(void *config_, void *dims_, void *mem_, const char *field, void *return_value_) { ocp_nlp_config *config = config_; ocp_nlp_dims *dims = dims_; ocp_nlp_sqp_rti_memory *mem = mem_; if (!strcmp("sqp_iter", field)) { int *value = return_value_; *value = 1; } else if (!strcmp("status", field)) { int *value = return_value_; *value = mem->status; } else if (!strcmp("time_tot", field) || !strcmp("tot_time", field)) { double *value = return_value_; *value = mem->time_tot; } else if (!strcmp("time_qp_sol", field) || !strcmp("time_qp", field)) { double *value = return_value_; *value = mem->time_qp_sol; } else if (!strcmp("time_qp_solver", field) || !strcmp("time_qp_solver_call", field)) { double *value = return_value_; *value = mem->time_qp_solver_call; } else if (!strcmp("time_qp_xcond", field)) { double *value = return_value_; *value = mem->time_qp_xcond; } else if (!strcmp("time_lin", field)) { double *value = return_value_; *value = mem->time_lin; } else if (!strcmp("time_reg", field)) { double *value = return_value_; *value = mem->time_reg; } else if (!strcmp("time_sim", field) || !strcmp("time_sim_ad", field) || !strcmp("time_sim_la", field)) { double tmp = 0.0; double *ptr = return_value_; int N = dims->N; int ii; for (ii=0; ii<N; ii++) { config->dynamics[ii]->memory_get(config->dynamics[ii], dims->dynamics[ii], mem->nlp_mem->dynamics[ii], field, &tmp); *ptr += tmp; } } else if (!strcmp("stat", field)) { double **value = return_value_; *value = mem->stat; } else if (!strcmp("statistics", field)) { int n_row = 2; double *value = return_value_; for (int ii=0; ii<n_row; ii++) { value[ii+0] = ii; for (int jj=0; jj<mem->stat_n; jj++) value[ii+(jj+1)*n_row] = mem->stat[jj+ii*mem->stat_n]; } } else if (!strcmp("stat_m", field)) { int *value = return_value_; *value = mem->stat_m; } else if (!strcmp("stat_n", field)) { int *value = return_value_; *value = mem->stat_n; } else if (!strcmp("nlp_mem", field)) { void **value = return_value_; *value = mem->nlp_mem; } else if (!strcmp("qp_xcond_dims", field)) { void **value = return_value_; *value = dims->qp_solver->xcond_dims; } else if (!strcmp("nlp_res", field)) { ocp_nlp_res **value = return_value_; *value = mem->nlp_mem->nlp_res; } else if (!strcmp("qp_xcond_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_in; } else if (!strcmp("qp_xcond_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_solver_mem->xcond_qp_out; } else if (!strcmp("qp_in", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_in; } else if (!strcmp("qp_out", field)) { void **value = return_value_; *value = mem->nlp_mem->qp_out; } else if (!strcmp("qp_iter", field)) { config->qp_solver->memory_get(config->qp_solver, mem->nlp_mem->qp_solver_mem, "iter", return_value_); } else if (!strcmp("res_stat", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_stat; } else if (!strcmp("res_eq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_eq; } else if (!strcmp("res_ineq", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_ineq; } else if (!strcmp("res_comp", field)) { double *value = return_value_; *value = mem->nlp_mem->nlp_res->inf_norm_res_comp; } else if (!strcmp("cost_value", field)) { double *value = return_value_; *value = mem->nlp_mem->cost_value; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_rti_get\n", field); exit(1); } } void ocp_nlp_sqp_rti_opts_get(void *config_, void *dims_, void *opts_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_opts *opts = opts_; if (!strcmp("nlp_opts", field)) { void **value = return_value_; *value = opts->nlp_opts; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_rti_opts_get\n", field); exit(1); } } void ocp_nlp_sqp_rti_work_get(void *config_, void *dims_, void *work_, const char *field, void *return_value_) { // ocp_nlp_config *config = config_; ocp_nlp_sqp_rti_workspace *work = work_; if (!strcmp("nlp_work", field)) { void **value = return_value_; *value = work->nlp_work; } else { printf("\nerror: field %s not available in ocp_nlp_sqp_rti_work_get\n", field); exit(1); } } void ocp_nlp_sqp_rti_config_initialize_default(void *config_) { ocp_nlp_config *config = (ocp_nlp_config *) config_; config->opts_calculate_size = &ocp_nlp_sqp_rti_opts_calculate_size; config->opts_assign = &ocp_nlp_sqp_rti_opts_assign; config->opts_initialize_default = &ocp_nlp_sqp_rti_opts_initialize_default; config->opts_update = &ocp_nlp_sqp_rti_opts_update; config->opts_set = &ocp_nlp_sqp_rti_opts_set; config->opts_set_at_stage = &ocp_nlp_sqp_rti_opts_set_at_stage; config->memory_calculate_size = &ocp_nlp_sqp_rti_memory_calculate_size; config->memory_assign = &ocp_nlp_sqp_rti_memory_assign; config->workspace_calculate_size = &ocp_nlp_sqp_rti_workspace_calculate_size; config->evaluate = &ocp_nlp_sqp_rti; config->eval_param_sens = &ocp_nlp_sqp_rti_eval_param_sens; config->config_initialize_default = &ocp_nlp_sqp_rti_config_initialize_default; config->precompute = &ocp_nlp_sqp_rti_precompute; config->get = &ocp_nlp_sqp_rti_get; config->opts_get = &ocp_nlp_sqp_rti_opts_get; config->work_get = &ocp_nlp_sqp_rti_work_get; return; }
tree.h
#ifndef LIGHTGBM_TREE_H_ #define LIGHTGBM_TREE_H_ #include <LightGBM/meta.h> #include <LightGBM/dataset.h> #include <string> #include <vector> #include <memory> namespace LightGBM { #define kMaxTreeOutput (100) /*! * \brief Tree model */ class Tree { public: /*! * \brief Constructor * \param max_leaves The number of max leaves */ explicit Tree(int max_leaves); /*! * \brief Construtor, from a string * \param str Model string */ explicit Tree(const std::string& str); ~Tree(); /*! * \brief Performing a split on tree leaves. * \param leaf Index of leaf to be split * \param feature Index of feature; the converted index after removing useless features * \param bin_type type of this feature, numerical or categorical * \param threshold Threshold(bin) of split * \param real_feature Index of feature, the original index on data * \param threshold_double Threshold on feature value * \param left_value Model Left child output * \param right_value Model Right child output * \param left_cnt Count of left child * \param right_cnt Count of right child * \param gain Split gain * \param zero_bin bin value for value==0 (missing value) * \param default_bin default conversion for the missing value, in bin * \param default_value default conversion for the missing value, in float value * \return The index of new leaf. */ int Split(int leaf, int feature, BinType bin_type, uint32_t threshold, int real_feature, double threshold_double, double left_value, double right_value, data_size_t left_cnt, data_size_t right_cnt, double gain, uint32_t zero_bin, uint32_t default_bin_for_zero, double default_value); /*! \brief Get the output of one leaf */ inline double LeafOutput(int leaf) const { return leaf_value_[leaf]; } /*! \brief Set the output of one leaf */ inline void SetLeafOutput(int leaf, double output) { leaf_value_[leaf] = output; } /*! * \brief Adding prediction value of this tree model to scores * \param data The dataset * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, data_size_t num_data, double* score) const; /*! * \brief Adding prediction value of this tree model to scorese * \param data The dataset * \param used_data_indices Indices of used data * \param num_data Number of total data * \param score Will add prediction to score */ void AddPredictionToScore(const Dataset* data, const data_size_t* used_data_indices, data_size_t num_data, double* score) const; /*! * \brief Prediction on one record * \param feature_values Feature value of this record * \return Prediction result */ inline double Predict(const double* feature_values) const; inline int PredictLeafIndex(const double* feature_values) const; /*! \brief Get Number of leaves*/ inline int num_leaves() const { return num_leaves_; } /*! \brief Get depth of specific leaf*/ inline int leaf_depth(int leaf_idx) const { return leaf_depth_[leaf_idx]; } /*! \brief Get feature of specific split*/ inline int split_feature(int split_idx) const { return split_feature_[split_idx]; } inline double split_gain(int split_idx) const { return split_gain_[split_idx]; } /*! * \brief Shrinkage for the tree's output * shrinkage rate (a.k.a learning rate) is used to tune the traning process * \param rate The factor of shrinkage */ inline void Shrinkage(double rate) { #pragma omp parallel for schedule(static, 512) if (num_leaves_ >= 1024) for (int i = 0; i < num_leaves_; ++i) { leaf_value_[i] *= rate; if (leaf_value_[i] > kMaxTreeOutput) { leaf_value_[i] = kMaxTreeOutput; } else if (leaf_value_[i] < -kMaxTreeOutput) { leaf_value_[i] = -kMaxTreeOutput; } } shrinkage_ *= rate; } /*! \brief Serialize this object to string*/ std::string ToString(); /*! \brief Serialize this object to json*/ std::string ToJSON(); /*! \brief Serialize this object to if-else statement*/ std::string ToIfElse(int index, bool is_predict_leaf_index); template<typename T> static bool CategoricalDecision(T fval, T threshold) { if (static_cast<int>(fval) == static_cast<int>(threshold)) { return true; } else { return false; } } template<typename T> static bool NumericalDecision(T fval, T threshold) { if (fval <= threshold) { return true; } else { return false; } } static double DefaultValueForZero(double fval, double zero, double out) { if (fval > -zero && fval <= zero) { return out; } else { return fval; } } static uint32_t DefaultValueForZero(uint32_t fval, uint32_t zero, uint32_t out) { if (fval == zero) { return out; } else { return fval; } } static const char* GetDecisionTypeName(int8_t type) { if (type == 0) { return "no_greater"; } else { return "is"; } } static std::vector<bool(*)(uint32_t, uint32_t)> inner_decision_funs; static std::vector<bool(*)(double, double)> decision_funs; private: /*! * \brief Find leaf index of which record belongs by features * \param feature_values Feature value of this record * \return Leaf index */ inline int GetLeaf(const double* feature_values) const; /*! \brief Serialize one node to json*/ inline std::string NodeToJSON(int index); /*! \brief Serialize one node to if-else statement*/ inline std::string NodeToIfElse(int index, bool is_predict_leaf_index); /*! \brief Number of max leaves*/ int max_leaves_; /*! \brief Number of current levas*/ int num_leaves_; // following values used for non-leaf node /*! \brief A non-leaf node's left child */ std::vector<int> left_child_; /*! \brief A non-leaf node's right child */ std::vector<int> right_child_; /*! \brief A non-leaf node's split feature */ std::vector<int> split_feature_inner_; /*! \brief A non-leaf node's split feature, the original index */ std::vector<int> split_feature_; /*! \brief A non-leaf node's split threshold in bin */ std::vector<uint32_t> threshold_in_bin_; /*! \brief A non-leaf node's split threshold in feature value */ std::vector<double> threshold_; /*! \brief Decision type, 0 for '<='(numerical feature), 1 for 'is'(categorical feature) */ std::vector<int8_t> decision_type_; /*! \brief Default values for the na/0 feature values */ std::vector<double> default_value_; std::vector<uint32_t> zero_bin_; std::vector<uint32_t> default_bin_for_zero_; /*! \brief A non-leaf node's split gain */ std::vector<double> split_gain_; // used for leaf node /*! \brief The parent of leaf */ std::vector<int> leaf_parent_; /*! \brief Output of leaves */ std::vector<double> leaf_value_; /*! \brief DataCount of leaves */ std::vector<data_size_t> leaf_count_; /*! \brief Output of non-leaf nodes */ std::vector<double> internal_value_; /*! \brief DataCount of non-leaf nodes */ std::vector<data_size_t> internal_count_; /*! \brief Depth for leaves */ std::vector<int> leaf_depth_; double shrinkage_; bool has_categorical_; }; inline double Tree::Predict(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return LeafOutput(leaf); } else { return 0.0f; } } inline int Tree::PredictLeafIndex(const double* feature_values) const { if (num_leaves_ > 1) { int leaf = GetLeaf(feature_values); return leaf; } else { return 0; } } inline int Tree::GetLeaf(const double* feature_values) const { int node = 0; if (has_categorical_) { while (node >= 0) { double fval = DefaultValueForZero(feature_values[split_feature_[node]], kMissingValueRange, default_value_[node]); if (decision_funs[decision_type_[node]]( fval, threshold_[node])) { node = left_child_[node]; } else { node = right_child_[node]; } } } else { while (node >= 0) { double fval = DefaultValueForZero(feature_values[split_feature_[node]], kMissingValueRange, default_value_[node]); if (NumericalDecision<double>( fval, threshold_[node])) { node = left_child_[node]; } else { node = right_child_[node]; } } } return ~node; } } // namespace LightGBM #endif // LightGBM_TREE_H_
transform.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT RRRR AAA N N SSSSS FFFFF OOO RRRR M M % % T R R A A NN N SS F O O R R MM MM % % T RRRR AAAAA N N N SSS FFF O O RRRR M M M % % T R R A A N NN SS F O O R R M M % % T R R A A N N SSSSS F OOO R R M M % % % % % % MagickCore Image Transform Methods % % % % Software Design % % Cristy % % July 1992 % % % % % % Copyright 1999-2017 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/attribute.h" #include "MagickCore/cache.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/composite.h" #include "MagickCore/distort.h" #include "MagickCore/draw.h" #include "MagickCore/effect.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/geometry.h" #include "MagickCore/image.h" #include "MagickCore/memory_.h" #include "MagickCore/layer.h" #include "MagickCore/list.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/resource_.h" #include "MagickCore/resize.h" #include "MagickCore/statistic.h" #include "MagickCore/string_.h" #include "MagickCore/thread-private.h" #include "MagickCore/transform.h" #include "MagickCore/transform-private.h" /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o O r i e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoOrientImage() adjusts an image so that its orientation is suitable for % viewing (i.e. top-left orientation). % % The format of the AutoOrientImage method is: % % Image *AutoOrientImage(const Image *image, % const OrientationType orientation,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image. % % o orientation: Current image orientation. % % o exception: Return any errors or warnings in this structure. % */ MagickExport Image *AutoOrientImage(const Image *image, const OrientationType orientation,ExceptionInfo *exception) { Image *orient_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); orient_image=(Image *) NULL; switch(orientation) { case UndefinedOrientation: case TopLeftOrientation: default: { orient_image=CloneImage(image,0,0,MagickTrue,exception); break; } case TopRightOrientation: { orient_image=FlopImage(image,exception); break; } case BottomRightOrientation: { orient_image=RotateImage(image,180.0,exception); break; } case BottomLeftOrientation: { orient_image=FlipImage(image,exception); break; } case LeftTopOrientation: { orient_image=TransposeImage(image,exception); break; } case RightTopOrientation: { orient_image=RotateImage(image,90.0,exception); break; } case RightBottomOrientation: { orient_image=TransverseImage(image,exception); break; } case LeftBottomOrientation: { orient_image=RotateImage(image,270.0,exception); break; } } if (orient_image != (Image *) NULL) orient_image->orientation=TopLeftOrientation; return(orient_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChopImage() removes a region of an image and collapses the image to occupy % the removed portion. % % The format of the ChopImage method is: % % Image *ChopImage(const Image *image,const RectangleInfo *chop_info) % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o chop_info: Define the region of the image to chop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ChopImage(const Image *image,const RectangleInfo *chop_info, ExceptionInfo *exception) { #define ChopImageTag "Chop/Image" CacheView *chop_view, *image_view; Image *chop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo extent; ssize_t y; /* Check chop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); assert(chop_info != (RectangleInfo *) NULL); if (((chop_info->x+(ssize_t) chop_info->width) < 0) || ((chop_info->y+(ssize_t) chop_info->height) < 0) || (chop_info->x > (ssize_t) image->columns) || (chop_info->y > (ssize_t) image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); extent=(*chop_info); if ((extent.x+(ssize_t) extent.width) > (ssize_t) image->columns) extent.width=(size_t) ((ssize_t) image->columns-extent.x); if ((extent.y+(ssize_t) extent.height) > (ssize_t) image->rows) extent.height=(size_t) ((ssize_t) image->rows-extent.y); if (extent.x < 0) { extent.width-=(size_t) (-extent.x); extent.x=0; } if (extent.y < 0) { extent.height-=(size_t) (-extent.y); extent.y=0; } chop_image=CloneImage(image,image->columns-extent.width,image->rows- extent.height,MagickTrue,exception); if (chop_image == (Image *) NULL) return((Image *) NULL); /* Extract chop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); chop_view=AcquireAuthenticCacheView(chop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,chop_image,1,1) #endif for (y=0; y < (ssize_t) extent.y; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,y,chop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } /* Extract chop image. */ #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,chop_image,1,1) #endif for (y=0; y < (ssize_t) (image->rows-(extent.y+extent.height)); y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,extent.y+extent.height+y, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(chop_view,0,extent.y+y,chop_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { if ((x < extent.x) || (x >= (ssize_t) (extent.x+extent.width))) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait chop_traits=GetPixelChannelTraits(chop_image,channel); if ((traits == UndefinedPixelTrait) || (chop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(chop_image,channel,p[i],q); } q+=GetPixelChannels(chop_image); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(chop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ChopImage) #endif proceed=SetImageProgress(image,ChopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } chop_view=DestroyCacheView(chop_view); image_view=DestroyCacheView(image_view); chop_image->type=image->type; if (status == MagickFalse) chop_image=DestroyImage(chop_image); return(chop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s o l i d a t e C M Y K I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConsolidateCMYKImage() consolidates separate C, M, Y, and K planes into a % single image. % % The format of the ConsolidateCMYKImage method is: % % Image *ConsolidateCMYKImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ConsolidateCMYKImages(const Image *images, ExceptionInfo *exception) { CacheView *cmyk_view, *image_view; Image *cmyk_image, *cmyk_images; register ssize_t j; ssize_t y; /* Consolidate separate C, M, Y, and K planes into a single image. */ assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); cmyk_images=NewImageList(); for (j=0; j < (ssize_t) GetImageListLength(images); j+=4) { register ssize_t i; assert(images != (Image *) NULL); cmyk_image=CloneImage(images,images->columns,images->rows,MagickTrue, exception); if (cmyk_image == (Image *) NULL) break; if (SetImageStorageClass(cmyk_image,DirectClass,exception) == MagickFalse) break; (void) SetImageColorspace(cmyk_image,CMYKColorspace,exception); for (i=0; i < 4; i++) { image_view=AcquireVirtualCacheView(images,exception); cmyk_view=AcquireAuthenticCacheView(cmyk_image,exception); for (y=0; y < (ssize_t) images->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; p=GetCacheViewVirtualPixels(image_view,0,y,images->columns,1,exception); q=QueueCacheViewAuthenticPixels(cmyk_view,0,y,cmyk_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) break; for (x=0; x < (ssize_t) images->columns; x++) { Quantum pixel; pixel=QuantumRange-GetPixelIntensity(images,p); switch (i) { case 0: SetPixelCyan(cmyk_image,pixel,q); break; case 1: SetPixelMagenta(cmyk_image,pixel,q); break; case 2: SetPixelYellow(cmyk_image,pixel,q); break; case 3: SetPixelBlack(cmyk_image,pixel,q); break; default: break; } p+=GetPixelChannels(images); q+=GetPixelChannels(cmyk_image); } if (SyncCacheViewAuthenticPixels(cmyk_view,exception) == MagickFalse) break; } cmyk_view=DestroyCacheView(cmyk_view); image_view=DestroyCacheView(image_view); images=GetNextImageInList(images); if (images == (Image *) NULL) break; } AppendImageToList(&cmyk_images,cmyk_image); } return(cmyk_images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImage() extracts a region of the image starting at the offset defined % by geometry. Region must be fully defined, and no special handling of % geometry flags is performed. % % The format of the CropImage method is: % % Image *CropImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to crop with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *CropImage(const Image *image,const RectangleInfo *geometry, ExceptionInfo *exception) { #define CropImageTag "Crop/Image" CacheView *crop_view, *image_view; Image *crop_image; MagickBooleanType status; MagickOffsetType progress; OffsetInfo offset; RectangleInfo bounding_box, page; ssize_t y; /* Check crop geometry. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); bounding_box=image->page; if ((bounding_box.width == 0) || (bounding_box.height == 0)) { bounding_box.width=image->columns; bounding_box.height=image->rows; } page=(*geometry); if (page.width == 0) page.width=bounding_box.width; if (page.height == 0) page.height=bounding_box.height; if (((bounding_box.x-page.x) >= (ssize_t) page.width) || ((bounding_box.y-page.y) >= (ssize_t) page.height) || ((page.x-bounding_box.x) > (ssize_t) image->columns) || ((page.y-bounding_box.y) > (ssize_t) image->rows)) { /* Crop is not within virtual canvas, return 1 pixel transparent image. */ (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha=(Quantum) TransparentAlpha; crop_image->alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=bounding_box; crop_image->page.x=(-1); crop_image->page.y=(-1); if (crop_image->dispose == BackgroundDispose) crop_image->dispose=NoneDispose; return(crop_image); } if ((page.x < 0) && (bounding_box.x >= 0)) { page.width+=page.x-bounding_box.x; page.x=0; } else { page.width-=bounding_box.x-page.x; page.x-=bounding_box.x; if (page.x < 0) page.x=0; } if ((page.y < 0) && (bounding_box.y >= 0)) { page.height+=page.y-bounding_box.y; page.y=0; } else { page.height-=bounding_box.y-page.y; page.y-=bounding_box.y; if (page.y < 0) page.y=0; } if ((page.x+(ssize_t) page.width) > (ssize_t) image->columns) page.width=image->columns-page.x; if ((geometry->width != 0) && (page.width > geometry->width)) page.width=geometry->width; if ((page.y+(ssize_t) page.height) > (ssize_t) image->rows) page.height=image->rows-page.y; if ((geometry->height != 0) && (page.height > geometry->height)) page.height=geometry->height; bounding_box.x+=page.x; bounding_box.y+=page.y; if ((page.width == 0) || (page.height == 0)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionWarning, "GeometryDoesNotContainImage","`%s'",image->filename); return((Image *) NULL); } /* Initialize crop image attributes. */ crop_image=CloneImage(image,page.width,page.height,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->page.width=image->page.width; crop_image->page.height=image->page.height; offset.x=(ssize_t) (bounding_box.x+bounding_box.width); offset.y=(ssize_t) (bounding_box.y+bounding_box.height); if ((offset.x > (ssize_t) image->page.width) || (offset.y > (ssize_t) image->page.height)) { crop_image->page.width=bounding_box.width; crop_image->page.height=bounding_box.height; } crop_image->page.x=bounding_box.x; crop_image->page.y=bounding_box.y; /* Crop image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); crop_view=AcquireAuthenticCacheView(crop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,crop_image,1,1) #endif for (y=0; y < (ssize_t) crop_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,page.x,page.y+y,crop_image->columns, 1,exception); q=QueueCacheViewAuthenticPixels(crop_view,0,y,crop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) crop_image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { SetPixelBackgoundColor(crop_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(crop_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait crop_traits=GetPixelChannelTraits(crop_image,channel); if ((traits == UndefinedPixelTrait) || (crop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(crop_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(crop_image); } if (SyncCacheViewAuthenticPixels(crop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_CropImage) #endif proceed=SetImageProgress(image,CropImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } crop_view=DestroyCacheView(crop_view); image_view=DestroyCacheView(image_view); crop_image->type=image->type; if (status == MagickFalse) crop_image=DestroyImage(crop_image); return(crop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C r o p I m a g e T o T i l e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CropImageToTiles() crops a single image, into a possible list of tiles. % This may include a single sub-region of the image. This basically applies % all the normal geometry flags for Crop. % % Image *CropImageToTiles(const Image *image, % const RectangleInfo *crop_geometry, ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. % % o exception: return any errors or warnings in this structure. % */ static inline double MagickRound(double x) { /* Round the fraction to nearest integer. */ if ((x-floor(x)) < (ceil(x)-x)) return(floor(x)); return(ceil(x)); } MagickExport Image *CropImageToTiles(const Image *image, const char *crop_geometry,ExceptionInfo *exception) { Image *next, *crop_image; MagickStatusType flags; RectangleInfo geometry; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); crop_image=NewImageList(); next=NewImageList(); flags=ParseGravityGeometry(image,crop_geometry,&geometry,exception); if ((flags & AreaValue) != 0) { PointInfo delta, offset; RectangleInfo crop; size_t height, width; /* Crop into NxM tiles (@ flag). */ width=image->columns; height=image->rows; if (geometry.width == 0) geometry.width=1; if (geometry.height == 0) geometry.height=1; if ((flags & AspectValue) == 0) { width-=(geometry.x < 0 ? -1 : 1)*geometry.x; height-=(geometry.y < 0 ? -1 : 1)*geometry.y; } else { width+=(geometry.x < 0 ? -1 : 1)*geometry.x; height+=(geometry.y < 0 ? -1 : 1)*geometry.y; } delta.x=(double) width/geometry.width; delta.y=(double) height/geometry.height; if (delta.x < 1.0) delta.x=1.0; if (delta.y < 1.0) delta.y=1.0; for (offset.y=0; offset.y < (double) height; ) { if ((flags & AspectValue) == 0) { crop.y=(ssize_t) MagickRound((double) (offset.y- (geometry.y > 0 ? 0 : geometry.y))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((double) (offset.y+ (geometry.y < 0 ? 0 : geometry.y))); } else { crop.y=(ssize_t) MagickRound((double) (offset.y- (geometry.y > 0 ? geometry.y : 0))); offset.y+=delta.y; /* increment now to find width */ crop.height=(size_t) MagickRound((double) (offset.y+(geometry.y < -1 ? geometry.y : 0))); } crop.height-=crop.y; crop.y+=image->page.y; for (offset.x=0; offset.x < (double) width; ) { if ((flags & AspectValue) == 0) { crop.x=(ssize_t) MagickRound((double) (offset.x- (geometry.x > 0 ? 0 : geometry.x))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((double) (offset.x+ (geometry.x < 0 ? 0 : geometry.x))); } else { crop.x=(ssize_t) MagickRound((double) (offset.x- (geometry.x > 0 ? geometry.x : 0))); offset.x+=delta.x; /* increment now to find height */ crop.width=(size_t) MagickRound((double) (offset.x+ (geometry.x < 0 ? geometry.x : 0))); } crop.width-=crop.x; crop.x+=image->page.x; next=CropImage(image,&crop,exception); if (next != (Image *) NULL) AppendImageToList(&crop_image,next); } } ClearMagickException(exception); return(crop_image); } if (((geometry.width == 0) && (geometry.height == 0)) || ((flags & XValue) != 0) || ((flags & YValue) != 0)) { /* Crop a single region at +X+Y. */ crop_image=CropImage(image,&geometry,exception); if ((crop_image != (Image *) NULL) && ((flags & AspectValue) != 0)) { crop_image->page.width=geometry.width; crop_image->page.height=geometry.height; crop_image->page.x-=geometry.x; crop_image->page.y-=geometry.y; } return(crop_image); } if ((image->columns > geometry.width) || (image->rows > geometry.height)) { RectangleInfo page; size_t height, width; ssize_t x, y; /* Crop into tiles of fixed size WxH. */ page=image->page; if (page.width == 0) page.width=image->columns; if (page.height == 0) page.height=image->rows; width=geometry.width; if (width == 0) width=page.width; height=geometry.height; if (height == 0) height=page.height; next=NewImageList(); for (y=0; y < (ssize_t) page.height; y+=(ssize_t) height) { for (x=0; x < (ssize_t) page.width; x+=(ssize_t) width) { geometry.width=width; geometry.height=height; geometry.x=x; geometry.y=y; next=CropImage(image,&geometry,exception); if (next == (Image *) NULL) break; AppendImageToList(&crop_image,next); } if (next == (Image *) NULL) break; } return(crop_image); } return(CloneImage(image,0,0,MagickTrue,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x c e r p t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExcerptImage() returns a excerpt of the image as defined by the geometry. % % The format of the ExcerptImage method is: % % Image *ExcerptImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExcerptImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define ExcerptImageTag "Excerpt/Image" CacheView *excerpt_view, *image_view; Image *excerpt_image; MagickBooleanType status; MagickOffsetType progress; ssize_t y; /* Allocate excerpt image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); excerpt_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (excerpt_image == (Image *) NULL) return((Image *) NULL); /* Excerpt each row. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); excerpt_view=AcquireAuthenticCacheView(excerpt_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,excerpt_image,excerpt_image->rows,1) #endif for (y=0; y < (ssize_t) excerpt_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,geometry->x,geometry->y+y, geometry->width,1,exception); q=GetCacheViewAuthenticPixels(excerpt_view,0,y,excerpt_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) excerpt_image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { SetPixelBackgoundColor(excerpt_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(excerpt_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait excerpt_traits=GetPixelChannelTraits(excerpt_image,channel); if ((traits == UndefinedPixelTrait) || (excerpt_traits == UndefinedPixelTrait)) continue; SetPixelChannel(excerpt_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(excerpt_image); } if (SyncCacheViewAuthenticPixels(excerpt_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ExcerptImage) #endif proceed=SetImageProgress(image,ExcerptImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } excerpt_view=DestroyCacheView(excerpt_view); image_view=DestroyCacheView(image_view); excerpt_image->type=image->type; if (status == MagickFalse) excerpt_image=DestroyImage(excerpt_image); return(excerpt_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtentImage() extends the image as defined by the geometry, gravity, and % image background color. Set the (x,y) offset of the geometry to move the % original image relative to the extended image. % % The format of the ExtentImage method is: % % Image *ExtentImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to extend with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ExtentImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { Image *extent_image; /* Allocate extent image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if ((image->columns == geometry->width) && (image->rows == geometry->height) && (geometry->x == 0) && (geometry->y == 0)) return(CloneImage(image,0,0,MagickTrue,exception)); extent_image=CloneImage(image,geometry->width,geometry->height,MagickTrue, exception); if (extent_image == (Image *) NULL) return((Image *) NULL); (void) SetImageBackgroundColor(extent_image,exception); (void) CompositeImage(extent_image,image,image->compose,MagickTrue, -geometry->x,-geometry->y,exception); return(extent_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l i p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlipImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis. % % The format of the FlipImage method is: % % Image *FlipImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlipImage(const Image *image,ExceptionInfo *exception) { #define FlipImageTag "Flip/Image" CacheView *flip_view, *image_view; Image *flip_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flip_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (flip_image == (Image *) NULL) return((Image *) NULL); /* Flip image. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flip_view=AcquireAuthenticCacheView(flip_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,flip_image,1,1) #endif for (y=0; y < (ssize_t) flip_image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flip_view,0,(ssize_t) (flip_image->rows-y- 1),flip_image->columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) flip_image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { SetPixelBackgoundColor(flip_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(flip_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait flip_traits=GetPixelChannelTraits(flip_image,channel); if ((traits == UndefinedPixelTrait) || (flip_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flip_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(flip_image); } if (SyncCacheViewAuthenticPixels(flip_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlipImage) #endif proceed=SetImageProgress(image,FlipImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flip_view=DestroyCacheView(flip_view); image_view=DestroyCacheView(image_view); flip_image->type=image->type; if (page.height != 0) page.y=(ssize_t) (page.height-flip_image->rows-page.y); flip_image->page=page; if (status == MagickFalse) flip_image=DestroyImage(flip_image); return(flip_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l o p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlopImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis. % % The format of the FlopImage method is: % % Image *FlopImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlopImage(const Image *image,ExceptionInfo *exception) { #define FlopImageTag "Flop/Image" CacheView *flop_view, *image_view; Image *flop_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); flop_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (flop_image == (Image *) NULL) return((Image *) NULL); /* Flop each row. */ status=MagickTrue; progress=0; page=image->page; image_view=AcquireVirtualCacheView(image,exception); flop_view=AcquireAuthenticCacheView(flop_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(image,flop_image,1,1) #endif for (y=0; y < (ssize_t) flop_image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(flop_view,0,y,flop_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(flop_image)*flop_image->columns; for (x=0; x < (ssize_t) flop_image->columns; x++) { register ssize_t i; q-=GetPixelChannels(flop_image); if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait flop_traits=GetPixelChannelTraits(flop_image,channel); if ((traits == UndefinedPixelTrait) || (flop_traits == UndefinedPixelTrait)) continue; SetPixelChannel(flop_image,channel,p[i],q); } p+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(flop_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_FlopImage) #endif proceed=SetImageProgress(image,FlopImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } flop_view=DestroyCacheView(flop_view); image_view=DestroyCacheView(image_view); flop_image->type=image->type; if (page.width != 0) page.x=(ssize_t) (page.width-flop_image->columns-page.x); flop_image->page=page; if (status == MagickFalse) flop_image=DestroyImage(flop_image); return(flop_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R o l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RollImage() offsets an image as defined by x_offset and y_offset. % % The format of the RollImage method is: % % Image *RollImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset: the number of columns to roll in the horizontal direction. % % o y_offset: the number of rows to roll in the vertical direction. % % o exception: return any errors or warnings in this structure. % */ static MagickBooleanType CopyImageRegion(Image *destination,const Image *source, const size_t columns,const size_t rows,const ssize_t sx,const ssize_t sy, const ssize_t dx,const ssize_t dy,ExceptionInfo *exception) { CacheView *source_view, *destination_view; MagickBooleanType status; ssize_t y; if (columns == 0) return(MagickTrue); status=MagickTrue; source_view=AcquireVirtualCacheView(source,exception); destination_view=AcquireAuthenticCacheView(destination,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(status) \ magick_threads(source,destination,rows,1) #endif for (y=0; y < (ssize_t) rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; /* Transfer scanline. */ if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(source_view,sx,sy+y,columns,1,exception); q=GetCacheViewAuthenticPixels(destination_view,dx,dy+y,columns,1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) columns; x++) { register ssize_t i; if (GetPixelWriteMask(source,p) == 0) { SetPixelBackgoundColor(destination,q); p+=GetPixelChannels(source); q+=GetPixelChannels(destination); continue; } for (i=0; i < (ssize_t) GetPixelChannels(source); i++) { PixelChannel channel=GetPixelChannelChannel(source,i); PixelTrait source_traits=GetPixelChannelTraits(source,channel); PixelTrait destination_traits=GetPixelChannelTraits(destination, channel); if ((source_traits == UndefinedPixelTrait) || (destination_traits == UndefinedPixelTrait)) continue; SetPixelChannel(destination,channel,p[i],q); } p+=GetPixelChannels(source); q+=GetPixelChannels(destination); } sync=SyncCacheViewAuthenticPixels(destination_view,exception); if (sync == MagickFalse) status=MagickFalse; } destination_view=DestroyCacheView(destination_view); source_view=DestroyCacheView(source_view); return(status); } MagickExport Image *RollImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,ExceptionInfo *exception) { #define RollImageTag "Roll/Image" Image *roll_image; MagickStatusType status; RectangleInfo offset; /* Initialize roll image attributes. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); roll_image=CloneImage(image,image->columns,image->rows,MagickTrue,exception); if (roll_image == (Image *) NULL) return((Image *) NULL); offset.x=x_offset; offset.y=y_offset; while (offset.x < 0) offset.x+=(ssize_t) image->columns; while (offset.x >= (ssize_t) image->columns) offset.x-=(ssize_t) image->columns; while (offset.y < 0) offset.y+=(ssize_t) image->rows; while (offset.y >= (ssize_t) image->rows) offset.y-=(ssize_t) image->rows; /* Roll image. */ status=CopyImageRegion(roll_image,image,(size_t) offset.x, (size_t) offset.y,(ssize_t) image->columns-offset.x,(ssize_t) image->rows- offset.y,0,0,exception); (void) SetImageProgress(image,RollImageTag,0,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x, (size_t) offset.y,0,(ssize_t) image->rows-offset.y,offset.x,0, exception); (void) SetImageProgress(image,RollImageTag,1,3); status&=CopyImageRegion(roll_image,image,(size_t) offset.x,image->rows- offset.y,(ssize_t) image->columns-offset.x,0,0,offset.y,exception); (void) SetImageProgress(image,RollImageTag,2,3); status&=CopyImageRegion(roll_image,image,image->columns-offset.x,image->rows- offset.y,0,0,offset.x,offset.y,exception); (void) SetImageProgress(image,RollImageTag,3,3); roll_image->type=image->type; if (status == MagickFalse) roll_image=DestroyImage(roll_image); return(roll_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h a v e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShaveImage() shaves pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the ShaveImage method is: % % Image *ShaveImage(const Image *image,const RectangleInfo *shave_info, % ExceptionInfo *exception) % % A description of each parameter follows: % % o shave_image: Method ShaveImage returns a pointer to the shaved % image. A null image is returned if there is a memory shortage or % if the image width or height is zero. % % o image: the image. % % o shave_info: Specifies a pointer to a RectangleInfo which defines the % region of the image to crop. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ShaveImage(const Image *image, const RectangleInfo *shave_info,ExceptionInfo *exception) { Image *shave_image; RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (((2*shave_info->width) >= image->columns) || ((2*shave_info->height) >= image->rows)) ThrowImageException(OptionWarning,"GeometryDoesNotContainImage"); SetGeometry(image,&geometry); geometry.width-=2*shave_info->width; geometry.height-=2*shave_info->height; geometry.x=(ssize_t) shave_info->width+image->page.x; geometry.y=(ssize_t) shave_info->height+image->page.y; shave_image=CropImage(image,&geometry,exception); if (shave_image == (Image *) NULL) return((Image *) NULL); shave_image->page.width-=2*shave_info->width; shave_image->page.height-=2*shave_info->height; shave_image->page.x-=(ssize_t) shave_info->width; shave_image->page.y-=(ssize_t) shave_info->height; return(shave_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImage() splices a solid color into the image as defined by the % geometry. % % The format of the SpliceImage method is: % % Image *SpliceImage(const Image *image,const RectangleInfo *geometry, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o geometry: Define the region of the image to splice with members % x, y, width, and height. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImage(const Image *image, const RectangleInfo *geometry,ExceptionInfo *exception) { #define SpliceImageTag "Splice/Image" CacheView *image_view, *splice_view; Image *splice_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo splice_geometry; ssize_t columns, y; /* Allocate splice image. */ assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(geometry != (const RectangleInfo *) NULL); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); splice_geometry=(*geometry); splice_image=CloneImage(image,image->columns+splice_geometry.width, image->rows+splice_geometry.height,MagickTrue,exception); if (splice_image == (Image *) NULL) return((Image *) NULL); if (SetImageStorageClass(splice_image,DirectClass,exception) == MagickFalse) { splice_image=DestroyImage(splice_image); return((Image *) NULL); } if ((IsPixelInfoGray(&splice_image->background_color) == MagickFalse) && (IsGrayColorspace(splice_image->colorspace) != MagickFalse)) (void) SetImageColorspace(splice_image,sRGBColorspace,exception); if ((splice_image->background_color.alpha_trait != UndefinedPixelTrait) && (splice_image->alpha_trait == UndefinedPixelTrait)) (void) SetImageAlpha(splice_image,OpaqueAlpha,exception); (void) SetImageBackgroundColor(splice_image,exception); /* Respect image geometry. */ switch (image->gravity) { default: case UndefinedGravity: case NorthWestGravity: break; case NorthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; break; } case NorthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; break; } case WestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.width/2; break; } case CenterGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case EastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height/2; break; } case SouthWestGravity: { splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width/2; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } case SouthEastGravity: { splice_geometry.x+=(ssize_t) splice_geometry.width; splice_geometry.y+=(ssize_t) splice_geometry.height; break; } } /* Splice image. */ status=MagickTrue; progress=0; columns=MagickMin(splice_geometry.x,(ssize_t) splice_image->columns); image_view=AcquireVirtualCacheView(image,exception); splice_view=AcquireAuthenticCacheView(splice_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,splice_image,1,1) #endif for (y=0; y < (ssize_t) splice_geometry.y; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,splice_image->columns,1, exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { SetPixelBackgoundColor(splice_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,p) == 0) { SetPixelBackgoundColor(splice_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,splice_image,1,1) #endif for (y=(ssize_t) (splice_geometry.y+splice_geometry.height); y < (ssize_t) splice_image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; if ((y < 0) || (y >= (ssize_t)splice_image->rows)) continue; p=GetCacheViewVirtualPixels(image_view,0,y-(ssize_t) splice_geometry.height, splice_image->columns,1,exception); q=QueueCacheViewAuthenticPixels(splice_view,0,y,splice_image->columns,1, exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { SetPixelBackgoundColor(splice_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } for ( ; x < (ssize_t) (splice_geometry.x+splice_geometry.width); x++) q+=GetPixelChannels(splice_image); for ( ; x < (ssize_t) splice_image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { SetPixelBackgoundColor(splice_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait splice_traits=GetPixelChannelTraits(splice_image,channel); if ((traits == UndefinedPixelTrait) || (splice_traits == UndefinedPixelTrait)) continue; SetPixelChannel(splice_image,channel,p[i],q); } SetPixelRed(splice_image,GetPixelRed(image,p),q); SetPixelGreen(splice_image,GetPixelGreen(image,p),q); SetPixelBlue(splice_image,GetPixelBlue(image,p),q); SetPixelAlpha(splice_image,GetPixelAlpha(image,p),q); p+=GetPixelChannels(image); q+=GetPixelChannels(splice_image); } if (SyncCacheViewAuthenticPixels(splice_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,SpliceImageTag,progress++, splice_image->rows); if (proceed == MagickFalse) status=MagickFalse; } } splice_view=DestroyCacheView(splice_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) splice_image=DestroyImage(splice_image); return(splice_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformImage() is a convenience method that behaves like ResizeImage() or % CropImage() but accepts scaling and/or cropping information as a region % geometry specification. If the operation fails, the original image handle % is left as is. % % This should only be used for single images. % % This function destroys what it assumes to be a single image list. % If the input image is part of a larger list, all other images in that list % will be simply 'lost', not destroyed. % % Also if the crop generates a list of images only the first image is resized. % And finally if the crop succeeds and the resize failed, you will get a % cropped image, as well as a 'false' or 'failed' report. % % This function and should probably be deprecated in favor of direct calls % to CropImageToTiles() or ResizeImage(), as appropriate. % % The format of the TransformImage method is: % % MagickBooleanType TransformImage(Image **image,const char *crop_geometry, % const char *image_geometry,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image The transformed image is returned as this parameter. % % o crop_geometry: A crop geometry string. This geometry defines a % subregion of the image to crop. % % o image_geometry: An image geometry string. This geometry defines the % final size of the image. % % o exception: return any errors or warnings in this structure. % */ MagickPrivate MagickBooleanType TransformImage(Image **image, const char *crop_geometry,const char *image_geometry,ExceptionInfo *exception) { Image *resize_image, *transform_image; RectangleInfo geometry; assert(image != (Image **) NULL); assert((*image)->signature == MagickCoreSignature); if ((*image)->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",(*image)->filename); transform_image=(*image); if (crop_geometry != (const char *) NULL) { Image *crop_image; /* Crop image to a user specified size. */ crop_image=CropImageToTiles(*image,crop_geometry,exception); if (crop_image == (Image *) NULL) transform_image=CloneImage(*image,0,0,MagickTrue,exception); else { transform_image=DestroyImage(transform_image); transform_image=GetFirstImageInList(crop_image); } *image=transform_image; } if (image_geometry == (const char *) NULL) return(MagickTrue); /* Scale image to a user specified size. */ (void) ParseRegionGeometry(transform_image,image_geometry,&geometry, exception); if ((transform_image->columns == geometry.width) && (transform_image->rows == geometry.height)) return(MagickTrue); resize_image=ResizeImage(transform_image,geometry.width,geometry.height, transform_image->filter,exception); if (resize_image == (Image *) NULL) return(MagickFalse); transform_image=DestroyImage(transform_image); transform_image=resize_image; *image=transform_image; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p o s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransposeImage() creates a horizontal mirror image by reflecting the pixels % around the central y-axis while rotating them by 90 degrees. % % The format of the TransposeImage method is: % % Image *TransposeImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransposeImage(const Image *image,ExceptionInfo *exception) { #define TransposeImageTag "Transpose/Image" CacheView *image_view, *transpose_view; Image *transpose_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transpose_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transpose_image == (Image *) NULL) return((Image *) NULL); /* Transpose image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transpose_view=AcquireAuthenticCacheView(transpose_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,transpose_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,(ssize_t) image->rows-y-1, image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transpose_view,(ssize_t) (image->rows-y-1), 0,1,transpose_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; if (GetPixelWriteMask(image,q) == 0) { SetPixelBackgoundColor(transpose_image,q); p+=GetPixelChannels(image); q+=GetPixelChannels(transpose_image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait transpose_traits=GetPixelChannelTraits(transpose_image, channel); if ((traits == UndefinedPixelTrait) || (transpose_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transpose_image,channel,p[i],q); } p+=GetPixelChannels(image); q+=GetPixelChannels(transpose_image); } if (SyncCacheViewAuthenticPixels(transpose_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransposeImage) #endif proceed=SetImageProgress(image,TransposeImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transpose_view=DestroyCacheView(transpose_view); image_view=DestroyCacheView(image_view); transpose_image->type=image->type; page=transpose_image->page; Swap(page.width,page.height); Swap(page.x,page.y); transpose_image->page=page; if (status == MagickFalse) transpose_image=DestroyImage(transpose_image); return(transpose_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s v e r s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransverseImage() creates a vertical mirror image by reflecting the pixels % around the central x-axis while rotating them by 270 degrees. % % The format of the TransverseImage method is: % % Image *TransverseImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TransverseImage(const Image *image,ExceptionInfo *exception) { #define TransverseImageTag "Transverse/Image" CacheView *image_view, *transverse_view; Image *transverse_image; MagickBooleanType status; MagickOffsetType progress; RectangleInfo page; ssize_t y; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); transverse_image=CloneImage(image,image->rows,image->columns,MagickTrue, exception); if (transverse_image == (Image *) NULL) return((Image *) NULL); /* Transverse image. */ status=MagickTrue; progress=0; image_view=AcquireVirtualCacheView(image,exception); transverse_view=AcquireAuthenticCacheView(transverse_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) shared(progress,status) \ magick_threads(image,transverse_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { MagickBooleanType sync; register const Quantum *magick_restrict p; register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); q=QueueCacheViewAuthenticPixels(transverse_view,(ssize_t) (image->rows-y-1), 0,1,transverse_image->rows,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } q+=GetPixelChannels(transverse_image)*image->columns; for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; q-=GetPixelChannels(transverse_image); if (GetPixelWriteMask(image,p) == 0) { p+=GetPixelChannels(image); continue; } for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel=GetPixelChannelChannel(image,i); PixelTrait traits=GetPixelChannelTraits(image,channel); PixelTrait transverse_traits=GetPixelChannelTraits(transverse_image, channel); if ((traits == UndefinedPixelTrait) || (transverse_traits == UndefinedPixelTrait)) continue; SetPixelChannel(transverse_image,channel,p[i],q); } p+=GetPixelChannels(image); } sync=SyncCacheViewAuthenticPixels(transverse_view,exception); if (sync == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_TransverseImage) #endif proceed=SetImageProgress(image,TransverseImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } transverse_view=DestroyCacheView(transverse_view); image_view=DestroyCacheView(image_view); transverse_image->type=image->type; page=transverse_image->page; Swap(page.width,page.height); Swap(page.x,page.y); if (page.width != 0) page.x=(ssize_t) (page.width-transverse_image->columns-page.x); if (page.height != 0) page.y=(ssize_t) (page.height-transverse_image->rows-page.y); transverse_image->page=page; if (status == MagickFalse) transverse_image=DestroyImage(transverse_image); return(transverse_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r i m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TrimImage() trims pixels from the image edges. It allocates the memory % necessary for the new Image structure and returns a pointer to the new % image. % % The format of the TrimImage method is: % % Image *TrimImage(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *TrimImage(const Image *image,ExceptionInfo *exception) { RectangleInfo geometry; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); geometry=GetImageBoundingBox(image,exception); if ((geometry.width == 0) || (geometry.height == 0)) { Image *crop_image; crop_image=CloneImage(image,1,1,MagickTrue,exception); if (crop_image == (Image *) NULL) return((Image *) NULL); crop_image->background_color.alpha=(Quantum) TransparentAlpha; crop_image->alpha_trait=BlendPixelTrait; (void) SetImageBackgroundColor(crop_image,exception); crop_image->page=image->page; crop_image->page.x=(-1); crop_image->page.y=(-1); return(crop_image); } geometry.x+=image->page.x; geometry.y+=image->page.y; return(CropImage(image,&geometry,exception)); }
atomic_messages.c
// RUN: %clang_cc1 -verify=expected,omp45 -fopenmp -fopenmp-version=45 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp50 -fopenmp -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp45 -fopenmp-simd -fopenmp-version=45 -ferror-limit 100 %s -Wuninitialized // RUN: %clang_cc1 -verify=expected,omp50 -fopenmp-simd -ferror-limit 100 %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp atomic read argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } int foo() { L1: foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); goto L1; } goto L2; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} { foo(); L2: foo(); } return 0; } struct S { int a; }; int readint() { int a = 0, b = 0; // Test for atomic read #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic read // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected lvalue expression}} a = 0; #pragma omp atomic read a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} #pragma omp atomic read read a = b; return 0; } int readS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}} expected-error@+1 {{unexpected OpenMP clause 'allocate' in directive '#pragma omp atomic'}} #pragma omp atomic read read allocate(a) // expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int writeint() { int a = 0, b = 0; // Test for atomic write #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} foo(); #pragma omp atomic write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected built-in assignment operator}} a += b; #pragma omp atomic write a = 0; #pragma omp atomic write a = b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write a = b; return 0; } int writeS() { struct S a, b; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}} #pragma omp atomic write write // expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}} // expected-note@+1 {{expected expression of scalar type}} a = b; return a.a; } int updateint() { int a = 0, b = 0; // Test for atomic update #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected an expression statement}} ; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} foo(); #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected built-in binary operator}} a = b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = b || a; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} a = a && b; #pragma omp atomic update // expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = (float)a + b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = 2 * b; #pragma omp atomic // expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} a = b + *&a; #pragma omp atomic update *&a = *&a + 2; #pragma omp atomic update a++; #pragma omp atomic ++a; #pragma omp atomic update a--; #pragma omp atomic --a; #pragma omp atomic update a += b; #pragma omp atomic a %= b; #pragma omp atomic update a *= b; #pragma omp atomic a -= b; #pragma omp atomic update a /= b; #pragma omp atomic a &= b; #pragma omp atomic update a ^= b; #pragma omp atomic a |= b; #pragma omp atomic update a <<= b; #pragma omp atomic a >>= b; #pragma omp atomic update a = b + a; #pragma omp atomic a = a * b; #pragma omp atomic update a = b - a; #pragma omp atomic a = a / b; #pragma omp atomic update a = b & a; #pragma omp atomic a = a ^ b; #pragma omp atomic update a = b | a; #pragma omp atomic a = a << b; #pragma omp atomic a = b >> a; // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'update' clause}} #pragma omp atomic update update a /= b; return 0; } int captureint() { int a = 0, b = 0, c = 0; // Test for atomic capture #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected compound statement}} ; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} foo(); #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected built-in binary or unary operator}} a = b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b || a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}} b = a = a && b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected assignment expression}} a = b + *&a; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} { a = b; } #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected exactly two expression statements}} {} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b;a = b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an lvalue expression with scalar type}} // expected-note@+1 {{expected in right hand side of the first expression}} {a = b; a = b || a;} #pragma omp atomic capture {b = a; a = a && b;} #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = (float)a + b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = 2 * b; #pragma omp atomic capture // expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both lvalue expressions with scalar type}} // expected-note@+1 {{expected in right hand side of expression}} b = a = b + *&a; #pragma omp atomic capture c = *&a = *&a + 2; #pragma omp atomic capture c = a++; #pragma omp atomic capture c = ++a; #pragma omp atomic capture c = a--; #pragma omp atomic capture c = --a; #pragma omp atomic capture c = a += b; #pragma omp atomic capture c = a %= b; #pragma omp atomic capture c = a *= b; #pragma omp atomic capture c = a -= b; #pragma omp atomic capture c = a /= b; #pragma omp atomic capture c = a &= b; #pragma omp atomic capture c = a ^= b; #pragma omp atomic capture c = a |= b; #pragma omp atomic capture c = a <<= b; #pragma omp atomic capture c = a >>= b; #pragma omp atomic capture c = a = b + a; #pragma omp atomic capture c = a = a * b; #pragma omp atomic capture c = a = b - a; #pragma omp atomic capture c = a = a / b; #pragma omp atomic capture c = a = b & a; #pragma omp atomic capture c = a = a ^ b; #pragma omp atomic capture c = a = b | a; #pragma omp atomic capture c = a = a << b; #pragma omp atomic capture c = a = b >> a; #pragma omp atomic capture { c = *&a; *&a = *&a + 2;} #pragma omp atomic capture { *&a = *&a + 2; c = *&a;} #pragma omp atomic capture {c = a; a++;} #pragma omp atomic capture {c = a; (a)++;} #pragma omp atomic capture {++a;c = a;} #pragma omp atomic capture {c = a;a--;} #pragma omp atomic capture {--a;c = a;} #pragma omp atomic capture {c = a; a += b;} #pragma omp atomic capture {c = a; (a) += b;} #pragma omp atomic capture {a %= b; c = a;} #pragma omp atomic capture {c = a; a *= b;} #pragma omp atomic capture {a -= b;c = a;} #pragma omp atomic capture {c = a; a /= b;} #pragma omp atomic capture {a &= b; c = a;} #pragma omp atomic capture {c = a; a ^= b;} #pragma omp atomic capture {a |= b; c = a;} #pragma omp atomic capture {c = a; a <<= b;} #pragma omp atomic capture {a >>= b; c = a;} #pragma omp atomic capture {c = a; a = b + a;} #pragma omp atomic capture {a = a * b; c = a;} #pragma omp atomic capture {c = a; a = b - a;} #pragma omp atomic capture {a = a / b; c = a;} #pragma omp atomic capture {c = a; a = b & a;} #pragma omp atomic capture {a = a ^ b; c = a;} #pragma omp atomic capture {c = a; a = b | a;} #pragma omp atomic capture {a = a << b; c = a;} #pragma omp atomic capture {c = a; a = b >> a;} #pragma omp atomic capture {c = a; a = foo();} // expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'capture' clause}} #pragma omp atomic capture capture b = a /= b; return 0; } void hint() { int a = 0; #pragma omp atomic hint // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected '(' after 'hint'}} a += 1; #pragma omp atomic hint( // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} a += 1; #pragma omp atomic hint(+ // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected expression}} expected-error {{expected ')'}} expected-note {{to match this '('}} a += 1; #pragma omp atomic hint(a // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{expected ')'}} expected-note {{to match this '('}} omp50-error {{integer constant expression}} a += 1; #pragma omp atomic hint(a) // omp45-error {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} omp50-error {{integer constant expression}} a += 1; #pragma omp atomic hint(1) hint(1) // omp45-error 2 {{unexpected OpenMP clause 'hint' in directive '#pragma omp atomic'}} expected-error {{directive '#pragma omp atomic' cannot contain more than one 'hint' clause}} a += 1; }
GB_bitmap_select_template.c
//------------------------------------------------------------------------------ // GB_bitmap_select_template: C=select(A,thunk) if A is bitmap or full //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Ab and Cb can be aliased, if A is bitmap and the selection is done in-place. // Ax and Cx are not aliased. // TODO: If done in-place, Cx can be passed as NULL. Then if A is not bitmap, // C->b needs to be allocated, but not C->x. // TODO: use a single GB_memcpy for the values, regardless of selectop, // if no typecasting is being done. { int8_t *Ab = A->b ; GB_ATYPE *restrict Ax = (GB_ATYPE *) A->x ; const int64_t avlen = A->vlen ; const int64_t avdim = A->vdim ; const size_t asize = A->type->size ; const int64_t anz = avlen * avdim ; int64_t pA, cnvals = 0 ; #pragma omp parallel for num_threads(nthreads) schedule(static) \ reduction(+:cnvals) for (pA = 0 ; pA < anz ; pA++) { int64_t i = pA % avlen ; int64_t j = pA / avlen ; #if defined ( GB_ENTRY_SELECTOR ) // test the existence and value of A(i,j) GB_TEST_VALUE_OF_ENTRY (keep, pA) ; #endif int8_t cb = GBB (Ab, pA) && #if defined ( GB_ENTRY_SELECTOR ) keep ; #elif defined ( GB_TRIL_SELECTOR ) (j-i <= ithunk) ; #elif defined ( GB_TRIU_SELECTOR ) (j-i >= ithunk) ; #elif defined ( GB_DIAG_SELECTOR ) (j-i == ithunk) ; #elif defined ( GB_OFFDIAG_SELECTOR ) (j-i != ithunk) ; #elif defined ( GB_ROWINDEX_SELECTOR ) (i+ithunk != 0) ; #elif defined ( GB_COLINDEX_SELECTOR ) (j+ithunk != 0) ; #elif defined ( GB_COLLE_SELECTOR ) (j <= ithunk) ; #elif defined ( GB_COLGT_SELECTOR ) (j > ithunk) ; #elif defined ( GB_ROWLE_SELECTOR ) (i <= ithunk) ; #elif defined ( GB_ROWGT_SELECTOR ) (i > ithunk) ; #endif Cb [pA] = cb ; cnvals += cb ; { // Cx [pA] = Ax [pA] GB_SELECT_ENTRY (Cx, pA, Ax, pA) ; } } (*cnvals_handle) = cnvals ; }
stats.c
//----------------------------------------------------------------------------- // stats.c // // Project: EPA SWMM5 // Version: 5.1 // Date: 03/20/14 (Build 5.1.001) // 09/15/14 (Build 5.1.007) // 03/19/15 (Build 5.1.008) // 08/01/16 (Build 5.1.011) // 03/14/17 (Build 5.1.012) // 05/10/18 (Build 5.1.013) // Author: L. Rossman (EPA) // R. Dickinson (CDM) // // Simulation statistics functions. // // Build 5.1.007: // - Exfiltration losses added to storage node statistics. // // Build 5.1.008: // - Support for updating groundwater statistics added. // - Support for updating maximum reported nodal depths added. // - OpenMP parallelization applied to updating node and link flow statistics. // - Updating of time that conduit is upstrm/dnstrm full was modified. // // Build 5.1.011: // - Surcharging is now evaluated only under dynamic wave flow routing and // storage nodes cannot be classified as surcharged. // // Build 5.1.012: // - Time step statistics now evaluated only in non-steady state periods. // - Check for full conduit flow now accounts for number of barrels. // // Build 5.1.013: // - Include omp.h protected against lack of compiler support for OpenMP. // - Statistics on impervious and pervious runoff totals added. // - Storage nodes with a non-zero surcharge depth (e.g. enclosed tanks) // can now be classified as being surcharged. //----------------------------------------------------------------------------- #define _CRT_SECURE_NO_DEPRECATE #include <stdlib.h> #include <string.h> #include <math.h> #include "headers.h" #include "swmm5.h" #if defined(_OPENMP) //(5.1.013) #include <omp.h> #endif //----------------------------------------------------------------------------- // Shared variables //----------------------------------------------------------------------------- #define MAX_STATS 5 static TSysStats SysStats; static TMaxStats MaxMassBalErrs[MAX_STATS]; static TMaxStats MaxCourantCrit[MAX_STATS]; static TMaxStats MaxFlowTurns[MAX_STATS]; static double SysOutfallFlow; //----------------------------------------------------------------------------- // Exportable variables (shared with statsrpt.c) //----------------------------------------------------------------------------- TSubcatchStats* SubcatchStats; TNodeStats* NodeStats; TLinkStats* LinkStats; TStorageStats* StorageStats; TOutfallStats* OutfallStats; TPumpStats* PumpStats; double MaxOutfallFlow; double MaxRunoffFlow; //----------------------------------------------------------------------------- // Imported variables //----------------------------------------------------------------------------- extern double* NodeInflow; // defined in massbal.c extern double* NodeOutflow; // defined in massbal.c //----------------------------------------------------------------------------- // External functions (declared in funcs.h) //----------------------------------------------------------------------------- // stats_open (called from swmm_start in swmm5.c) // stats_close (called from swmm_end in swmm5.c) // stats_report (called from swmm_end in swmm5.c) // stats_updateSubcatchStats (called from subcatch_getRunoff) // stats_updateGwaterStats (called from gwater_getGroundwater) // stats_updateFlowStats (called from routing_execute) // stats_updateCriticalTimeCount (called from getVariableStep in dynwave.c) // stats_updateMaxNodeDepth (called from output_saveNodeResults) //----------------------------------------------------------------------------- // Local functions //----------------------------------------------------------------------------- static void stats_updateNodeStats(int node, double tStep, DateTime aDate); static void stats_updateLinkStats(int link, double tStep, DateTime aDate); static void stats_findMaxStats(void); static void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x); //============================================================================= int stats_open() // // Input: none // Output: returns an error code // Purpose: opens the simulation statistics system. // { int j, k; // --- set all pointers to NULL NodeStats = NULL; LinkStats = NULL; StorageStats = NULL; OutfallStats = NULL; PumpStats = NULL; // --- allocate memory for & initialize subcatchment statistics SubcatchStats = NULL; if ( Nobjects[SUBCATCH] > 0 ) { SubcatchStats = (TSubcatchStats *) calloc(Nobjects[SUBCATCH], sizeof(TSubcatchStats)); if ( !SubcatchStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } for (j=0; j<Nobjects[SUBCATCH]; j++) { SubcatchStats[j].precip = 0.0; SubcatchStats[j].runon = 0.0; SubcatchStats[j].evap = 0.0; SubcatchStats[j].infil = 0.0; SubcatchStats[j].runoff = 0.0; SubcatchStats[j].maxFlow = 0.0; SubcatchStats[j].impervRunoff = 0.0; //(5.1.013) SubcatchStats[j].pervRunoff = 0.0; // } for (j=0; j<Nobjects[SUBCATCH]; j++) { if ( Subcatch[j].groundwater == NULL ) continue; Subcatch[j].groundwater->stats.avgUpperMoist = 0.0; Subcatch[j].groundwater->stats.avgWaterTable = 0.0; Subcatch[j].groundwater->stats.infil = 0.0; Subcatch[j].groundwater->stats.latFlow = 0.0; Subcatch[j].groundwater->stats.deepFlow = 0.0; Subcatch[j].groundwater->stats.evap = 0.0; Subcatch[j].groundwater->stats.maxFlow = 0.0; } } // --- allocate memory for node & link stats if ( Nobjects[LINK] > 0 ) { NodeStats = (TNodeStats *) calloc(Nobjects[NODE], sizeof(TNodeStats)); LinkStats = (TLinkStats *) calloc(Nobjects[LINK], sizeof(TLinkStats)); if ( !NodeStats || !LinkStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } } // --- initialize node stats if ( NodeStats ) for ( j = 0; j < Nobjects[NODE]; j++ ) { NodeStats[j].avgDepth = 0.0; NodeStats[j].maxDepth = 0.0; NodeStats[j].maxDepthDate = StartDateTime; NodeStats[j].maxRptDepth = 0.0; NodeStats[j].volFlooded = 0.0; NodeStats[j].timeFlooded = 0.0; NodeStats[j].timeSurcharged = 0.0; NodeStats[j].timeCourantCritical = 0.0; NodeStats[j].totLatFlow = 0.0; NodeStats[j].maxLatFlow = 0.0; NodeStats[j].maxInflow = 0.0; NodeStats[j].maxOverflow = 0.0; NodeStats[j].maxPondedVol = 0.0; NodeStats[j].maxInflowDate = StartDateTime; NodeStats[j].maxOverflowDate = StartDateTime; } // --- initialize link stats if ( LinkStats ) for ( j = 0; j < Nobjects[LINK]; j++ ) { LinkStats[j].maxFlow = 0.0; LinkStats[j].maxVeloc = 0.0; LinkStats[j].maxDepth = 0.0; LinkStats[j].timeSurcharged = 0.0; LinkStats[j].timeFullUpstream = 0.0; LinkStats[j].timeFullDnstream = 0.0; LinkStats[j].timeFullFlow = 0.0; LinkStats[j].timeCapacityLimited = 0.0; LinkStats[j].timeCourantCritical = 0.0; for (k=0; k<MAX_FLOW_CLASSES; k++) LinkStats[j].timeInFlowClass[k] = 0.0; LinkStats[j].flowTurns = 0; LinkStats[j].flowTurnSign = 0; } // --- allocate memory for & initialize storage unit statistics if ( Nnodes[STORAGE] > 0 ) { StorageStats = (TStorageStats *) calloc(Nnodes[STORAGE], sizeof(TStorageStats)); if ( !StorageStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } else for ( k = 0; k < Nobjects[NODE]; k++ ) { if ( Node[k].type != STORAGE ) continue; j = Node[k].subIndex; StorageStats[j].initVol = Node[k].newVolume; StorageStats[j].avgVol = 0.0; StorageStats[j].maxVol = 0.0; StorageStats[j].maxFlow = 0.0; StorageStats[j].evapLosses = 0.0; StorageStats[j].exfilLosses = 0.0; StorageStats[j].maxVolDate = StartDateTime; } } // --- allocate memory for & initialize outfall statistics if ( Nnodes[OUTFALL] > 0 ) { OutfallStats = (TOutfallStats *) calloc(Nnodes[OUTFALL], sizeof(TOutfallStats)); if ( !OutfallStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } else for ( j = 0; j < Nnodes[OUTFALL]; j++ ) { OutfallStats[j].avgFlow = 0.0; OutfallStats[j].maxFlow = 0.0; OutfallStats[j].totalPeriods = 0; if ( Nobjects[POLLUT] > 0 ) { OutfallStats[j].totalLoad = (double *) calloc(Nobjects[POLLUT], sizeof(double)); if ( !OutfallStats[j].totalLoad ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } for (k=0; k<Nobjects[POLLUT]; k++) OutfallStats[j].totalLoad[k] = 0.0; } else OutfallStats[j].totalLoad = NULL; } } // --- allocate memory & initialize pumping statistics if ( Nlinks[PUMP] > 0 ) { PumpStats = (TPumpStats *) calloc(Nlinks[PUMP], sizeof(TPumpStats)); if ( !PumpStats ) { report_writeErrorMsg(ERR_MEMORY, ""); return ErrorCode; } else for ( j = 0; j < Nlinks[PUMP]; j++ ) { PumpStats[j].utilized = 0.0; PumpStats[j].minFlow = 0.0; PumpStats[j].avgFlow = 0.0; PumpStats[j].maxFlow = 0.0; PumpStats[j].volume = 0.0; PumpStats[j].energy = 0.0; PumpStats[j].startUps = 0; PumpStats[j].offCurveLow = 0.0; PumpStats[j].offCurveHigh = 0.0; } } // --- initialize system stats MaxRunoffFlow = 0.0; MaxOutfallFlow = 0.0; SysStats.maxTimeStep = 0.0; SysStats.minTimeStep = RouteStep; SysStats.avgTimeStep = 0.0; SysStats.avgStepCount = 0.0; SysStats.steadyStateCount = 0.0; return 0; } //============================================================================= void stats_close() // // Input: none // Output: // Purpose: closes the simulation statistics system. // { int j; FREE(SubcatchStats); FREE(NodeStats); FREE(LinkStats); FREE(StorageStats); if ( OutfallStats ) { for ( j=0; j<Nnodes[OUTFALL]; j++ ) FREE(OutfallStats[j].totalLoad); FREE(OutfallStats); } FREE(PumpStats); } //============================================================================= void stats_report() // // Input: none // Output: none // Purpose: reports simulation statistics. // { // --- report flow routing accuracy statistics if ( Nobjects[LINK] > 0 && RouteModel != NO_ROUTING ) { stats_findMaxStats(); report_writeMaxStats(MaxMassBalErrs, MaxCourantCrit, MAX_STATS); report_writeMaxFlowTurns(MaxFlowTurns, MAX_STATS); report_writeSysStats(&SysStats); } // --- report summary statistics statsrpt_writeReport(); } //============================================================================= void stats_updateSubcatchStats(int j, double rainVol, double runonVol, double evapVol, double infilVol, double impervVol, double pervVol, double runoffVol, double runoff) // // Input: j = subcatchment index // rainVol = rainfall + snowfall volume (ft3) // runonVol = runon volume from other subcatchments (ft3) // evapVol = evaporation volume (ft3) // infilVol = infiltration volume (ft3) // impervVol = impervious runoff volume (ft3) // pervVol = pervious runoff volume (ft3) // runoffVol = runoff volume (ft3) // runoff = runoff rate (cfs) // Output: none // Purpose: updates totals of runoff components for a specific subcatchment. // { SubcatchStats[j].precip += rainVol; SubcatchStats[j].runon += runonVol; SubcatchStats[j].evap += evapVol; SubcatchStats[j].infil += infilVol; SubcatchStats[j].runoff += runoffVol; SubcatchStats[j].maxFlow = MAX(SubcatchStats[j].maxFlow, runoff); SubcatchStats[j].impervRunoff += impervVol; //(5.1.013) SubcatchStats[j].pervRunoff += pervVol; // } //============================================================================= void stats_updateGwaterStats(int j, double infil, double evap, double latFlow, double deepFlow, double theta, double waterTable, double tStep) { Subcatch[j].groundwater->stats.infil += infil * tStep; Subcatch[j].groundwater->stats.evap += evap * tStep; Subcatch[j].groundwater->stats.latFlow += latFlow * tStep; Subcatch[j].groundwater->stats.deepFlow += deepFlow * tStep; Subcatch[j].groundwater->stats.avgUpperMoist += theta * tStep; Subcatch[j].groundwater->stats.avgWaterTable += waterTable * tStep; Subcatch[j].groundwater->stats.finalUpperMoist = theta; Subcatch[j].groundwater->stats.finalWaterTable = waterTable; if ( fabs(latFlow) > fabs(Subcatch[j].groundwater->stats.maxFlow) ) { Subcatch[j].groundwater->stats.maxFlow = latFlow; } } //============================================================================= void stats_updateMaxRunoff() // // Input: none // Output: updates global variable MaxRunoffFlow // Purpose: updates value of maximum system runoff rate. // { int j; double sysRunoff = 0.0; for (j=0; j<Nobjects[SUBCATCH]; j++) sysRunoff += Subcatch[j].newRunoff; MaxRunoffFlow = MAX(MaxRunoffFlow, sysRunoff); } //============================================================================= void stats_updateMaxNodeDepth(int j, double depth) // // Input: j = node index // depth = water depth at node at current reporting time (ft) // Output: none // Purpose: updates a node's maximum depth recorded at reporting times. // { if ( NodeStats != NULL ) NodeStats[j].maxRptDepth = MAX(NodeStats[j].maxRptDepth, depth); } //============================================================================= void stats_updateFlowStats(double tStep, DateTime aDate, int stepCount, int steadyState) // // Input: tStep = routing time step (sec) // aDate = current date/time // stepCount = # steps required to solve routing at current time period // steadyState = TRUE if steady flow conditions exist // Output: none // Purpose: updates various flow routing statistics at current time period. // { int j; // --- update stats only after reporting period begins if ( aDate < ReportStart ) return; SysOutfallFlow = 0.0; // --- update node & link stats #pragma omp parallel num_threads(NumThreads) { #pragma omp for for ( j=0; j<Nobjects[NODE]; j++ ) stats_updateNodeStats(j, tStep, aDate); #pragma omp for for ( j=0; j<Nobjects[LINK]; j++ ) stats_updateLinkStats(j, tStep, aDate); } // --- update count of times in steady state SysStats.steadyStateCount += steadyState; // --- update time step stats if not in steady state if ( steadyState == FALSE ) { // --- skip initial time step for min. value) if ( OldRoutingTime > 0 ) { SysStats.minTimeStep = MIN(SysStats.minTimeStep, tStep); } SysStats.avgTimeStep += tStep; SysStats.maxTimeStep = MAX(SysStats.maxTimeStep, tStep); // --- update iteration step count stats SysStats.avgStepCount += stepCount; } // --- update max. system outfall flow MaxOutfallFlow = MAX(MaxOutfallFlow, SysOutfallFlow); } //============================================================================= void stats_updateCriticalTimeCount(int node, int link) // // Input: node = node index // link = link index // Output: none // Purpose: updates count of times a node or link was time step-critical. // { if ( node >= 0 ) NodeStats[node].timeCourantCritical += 1.0; else if ( link >= 0 ) LinkStats[link].timeCourantCritical += 1.0; } //============================================================================= void stats_updateNodeStats(int j, double tStep, DateTime aDate) // // Input: j = node index // tStep = routing time step (sec) // aDate = current date/time // Output: none // Purpose: updates flow statistics for a node. // { int k, p; double newVolume = Node[j].newVolume; double newDepth = Node[j].newDepth; double yCrown = Node[j].crownElev - Node[j].invertElev; int canPond = (AllowPonding && Node[j].pondedArea > 0.0); // --- update depth statistics NodeStats[j].avgDepth += newDepth; if ( newDepth > NodeStats[j].maxDepth ) { NodeStats[j].maxDepth = newDepth; NodeStats[j].maxDepthDate = aDate; } // --- update flooding, ponding, and surcharge statistics if ( Node[j].type != OUTFALL ) { if ( newVolume > Node[j].fullVolume || Node[j].overflow > 0.0 ) { NodeStats[j].timeFlooded += tStep; NodeStats[j].volFlooded += Node[j].overflow * tStep; if ( canPond ) NodeStats[j].maxPondedVol = MAX(NodeStats[j].maxPondedVol, (newVolume - Node[j].fullVolume)); } // --- for dynamic wave routing, classify a node as //(5.1.013) // surcharged if its water level exceeds its crown elev. if (RouteModel == DW) //(5.1.013) { if ((Node[j].type != STORAGE || Node[j].surDepth > 0.0) && //(5.1.013) newDepth + Node[j].invertElev + FUDGE >= Node[j].crownElev) { NodeStats[j].timeSurcharged += tStep; } } } // --- update storage statistics if ( Node[j].type == STORAGE ) { k = Node[j].subIndex; StorageStats[k].avgVol += newVolume; StorageStats[k].evapLosses += Storage[Node[j].subIndex].evapLoss; StorageStats[k].exfilLosses += Storage[Node[j].subIndex].exfilLoss; newVolume = MIN(newVolume, Node[j].fullVolume); if ( newVolume > StorageStats[k].maxVol ) { StorageStats[k].maxVol = newVolume; StorageStats[k].maxVolDate = aDate; } StorageStats[k].maxFlow = MAX(StorageStats[k].maxFlow, Node[j].outflow); } // --- update outfall statistics if ( Node[j].type == OUTFALL ) { k = Node[j].subIndex; if ( Node[j].inflow >= MIN_RUNOFF_FLOW ) { OutfallStats[k].avgFlow += Node[j].inflow; OutfallStats[k].maxFlow = MAX(OutfallStats[k].maxFlow, Node[j].inflow); OutfallStats[k].totalPeriods++; } for (p=0; p<Nobjects[POLLUT]; p++) { OutfallStats[k].totalLoad[p] += Node[j].inflow * Node[j].newQual[p] * tStep; } SysOutfallFlow += Node[j].inflow; } // --- update inflow statistics NodeStats[j].totLatFlow += ( (Node[j].oldLatFlow + Node[j].newLatFlow) * 0.5 * tStep ); if ( fabs(Node[j].newLatFlow) > fabs(NodeStats[j].maxLatFlow) ) NodeStats[j].maxLatFlow = Node[j].newLatFlow; if ( Node[j].inflow > NodeStats[j].maxInflow ) { NodeStats[j].maxInflow = Node[j].inflow; NodeStats[j].maxInflowDate = aDate; } // --- update overflow statistics if ( Node[j].overflow > NodeStats[j].maxOverflow ) { NodeStats[j].maxOverflow = Node[j].overflow; NodeStats[j].maxOverflowDate = aDate; } } //============================================================================= void stats_updateLinkStats(int j, double tStep, DateTime aDate) // // Input: j = link index // tStep = routing time step (sec) // aDate = current date/time // Output: none // Purpose: updates flow statistics for a link. // { int k; double q, v; double dq; // --- update max. flow dq = Link[j].newFlow - Link[j].oldFlow; q = fabs(Link[j].newFlow); if ( q > LinkStats[j].maxFlow ) { LinkStats[j].maxFlow = q; LinkStats[j].maxFlowDate = aDate; } // --- update max. velocity v = link_getVelocity(j, q, Link[j].newDepth); if ( v > LinkStats[j].maxVeloc ) { LinkStats[j].maxVeloc = v; } // --- update max. depth if ( Link[j].newDepth > LinkStats[j].maxDepth ) { LinkStats[j].maxDepth = Link[j].newDepth; } if ( Link[j].type == PUMP ) { if ( q >= Link[j].qFull ) LinkStats[j].timeFullFlow += tStep; if ( q > MIN_RUNOFF_FLOW ) { k = Link[j].subIndex; PumpStats[k].minFlow = MIN(PumpStats[k].minFlow, q); PumpStats[k].maxFlow = LinkStats[j].maxFlow; PumpStats[k].avgFlow += q; PumpStats[k].volume += q*tStep; PumpStats[k].utilized += tStep; PumpStats[k].energy += link_getPower(j)*tStep/3600.0; if ( Link[j].flowClass == DN_DRY ) PumpStats[k].offCurveLow += tStep; if ( Link[j].flowClass == UP_DRY ) PumpStats[k].offCurveHigh += tStep; if ( Link[j].oldFlow < MIN_RUNOFF_FLOW ) PumpStats[k].startUps++; PumpStats[k].totalPeriods++; LinkStats[j].timeSurcharged += tStep; LinkStats[j].timeFullUpstream += tStep; LinkStats[j].timeFullDnstream += tStep; } } else if ( Link[j].type == CONDUIT ) { // --- update time under normal flow & inlet control if ( Link[j].normalFlow ) LinkStats[j].timeNormalFlow += tStep; if ( Link[j].inletControl ) LinkStats[j].timeInletControl += tStep; // --- update flow classification distribution k = Link[j].flowClass; if ( k >= 0 && k < MAX_FLOW_CLASSES ) { ++LinkStats[j].timeInFlowClass[k]; } // --- update time conduit is full k = Link[j].subIndex; if ( q >= Link[j].qFull * (double)Conduit[k].barrels ) LinkStats[j].timeFullFlow += tStep; if ( Conduit[k].capacityLimited ) LinkStats[j].timeCapacityLimited += tStep; switch (Conduit[k].fullState) { case ALL_FULL: LinkStats[j].timeSurcharged += tStep; LinkStats[j].timeFullUpstream += tStep; LinkStats[j].timeFullDnstream += tStep; break; case UP_FULL: LinkStats[j].timeFullUpstream += tStep; break; case DN_FULL: LinkStats[j].timeFullDnstream += tStep; } } // --- update flow turn count k = LinkStats[j].flowTurnSign; LinkStats[j].flowTurnSign = SGN(dq); if ( fabs(dq) > 0.001 && k * LinkStats[j].flowTurnSign < 0 ) LinkStats[j].flowTurns++; } //============================================================================= void stats_findMaxStats() // // Input: none // Output: none // Purpose: finds nodes & links with highest mass balance errors // & highest times Courant time-step critical. // { int j; double x; // --- initialize max. stats arrays for (j=0; j<MAX_STATS; j++) { MaxMassBalErrs[j].objType = NODE; MaxMassBalErrs[j].index = -1; MaxMassBalErrs[j].value = -1.0; MaxCourantCrit[j].index = -1; MaxCourantCrit[j].value = -1.0; MaxFlowTurns[j].index = -1; MaxFlowTurns[j].value = -1.0; } // --- find links with most flow turns if ( StepCount > 2 ) { for (j=0; j<Nobjects[LINK]; j++) { x = 100.0 * LinkStats[j].flowTurns / (2./3.*(StepCount-2)); stats_updateMaxStats(MaxFlowTurns, LINK, j, x); } } // --- find nodes with largest mass balance errors for (j=0; j<Nobjects[NODE]; j++) { // --- skip terminal nodes and nodes with negligible inflow if ( Node[j].degree <= 0 ) continue; if ( NodeInflow[j] <= 0.1 ) continue; // --- evaluate mass balance error // (Note: NodeInflow & NodeOutflow include any initial and final // stored volumes, respectively). if ( NodeInflow[j] > 0.0 ) x = 1.0 - NodeOutflow[j] / NodeInflow[j]; else if ( NodeOutflow[j] > 0.0 ) x = -1.0; else x = 0.0; stats_updateMaxStats(MaxMassBalErrs, NODE, j, 100.0*x); } // --- stop if not using a variable time step if ( RouteModel != DW || CourantFactor == 0.0 ) return; // --- find nodes most frequently Courant critical if ( StepCount == 0 ) return; for (j=0; j<Nobjects[NODE]; j++) { x = NodeStats[j].timeCourantCritical / StepCount; stats_updateMaxStats(MaxCourantCrit, NODE, j, 100.0*x); } // --- find links most frequently Courant critical for (j=0; j<Nobjects[LINK]; j++) { x = LinkStats[j].timeCourantCritical / StepCount; stats_updateMaxStats(MaxCourantCrit, LINK, j, 100.0*x); } } //============================================================================= void stats_updateMaxStats(TMaxStats maxStats[], int i, int j, double x) // // Input: maxStats[] = array of critical statistics values // i = object category (NODE or LINK) // j = object index // x = value of statistic for the object // Output: none // Purpose: updates the collection of most critical statistics // { int k; TMaxStats maxStats1, maxStats2; maxStats1.objType = i; maxStats1.index = j; maxStats1.value = x; for (k=0; k<MAX_STATS; k++) { if ( fabs(maxStats1.value) > fabs(maxStats[k].value) ) { maxStats2 = maxStats[k]; maxStats[k] = maxStats1; maxStats1 = maxStats2; } } } // int stats_getNodeStat(int index, TNodeStats *nodeStats) // // Input: index // element = element to return // Return: value // Purpose: Gets a Node Stat for toolkitAPI // { int errorcode = 0; // Check if Open if (swmm_IsOpenFlag() == FALSE) { errorcode = ERR_API_INPUTNOTOPEN; } // Check if Simulation is Running else if (swmm_IsStartedFlag() == FALSE) { errorcode = ERR_API_SIM_NRUNNING; } // Check if object index is within bounds else if (index < 0 || index >= Nobjects[NODE]) { errorcode = ERR_API_OBJECT_INDEX; } else { memcpy(nodeStats, &NodeStats[index], sizeof(TNodeStats)); } return errorcode; } int stats_getStorageStat(int index, TStorageStats *storageStats) // // Input: subindex // element = element to return // Return: value // Purpose: Gets a Storage Stat for toolkitAPI // { int errorcode = 0; // Check if Open if (swmm_IsOpenFlag() == FALSE) { errorcode = ERR_API_INPUTNOTOPEN; } // Check if Simulation is Running else if (swmm_IsStartedFlag() == FALSE) { errorcode = ERR_API_SIM_NRUNNING; } // Check if object index is within bounds else if (index < 0 || index >= Nobjects[NODE]) { errorcode = ERR_API_OBJECT_INDEX; } // Check Node Type is storage else if (Node[index].type != STORAGE) { errorcode = ERR_API_WRONG_TYPE; } else { // fetch sub index int k = Node[index].subIndex; // Copy Structure memcpy(storageStats, &StorageStats[k], sizeof(TStorageStats)); } return errorcode; } int stats_getOutfallStat(int index, TOutfallStats *outfallStats) // // Input: subindex // element = element to return // Return: value // Purpose: Gets a Outfall Stat for toolkitAPI // { int errorcode = 0; int p; // Check if Open if (swmm_IsOpenFlag() == FALSE) { errorcode = ERR_API_INPUTNOTOPEN; } // Check if Simulation is Running else if (swmm_IsStartedFlag() == FALSE) { errorcode = ERR_API_SIM_NRUNNING; } // Check if object index is within bounds else if (index < 0 || index >= Nobjects[NODE]) { errorcode = ERR_API_OBJECT_INDEX; } // Check Node Type is outfall else if (Node[index].type != OUTFALL) { errorcode = ERR_API_WRONG_TYPE; } else { // fetch sub index int k = Node[index].subIndex; // Copy Structure memcpy(outfallStats, &OutfallStats[k], sizeof(TOutfallStats)); // Perform Deep Copy of Pollutants Results if (Nobjects[POLLUT] > 0) { outfallStats->totalLoad = (double *)calloc(Nobjects[POLLUT], sizeof(double)); if (!outfallStats->totalLoad) { errorcode = ERR_MEMORY; } if (errorcode == 0) { for (p = 0; p < Nobjects[POLLUT]; p++) outfallStats->totalLoad[p] = OutfallStats[k].totalLoad[p]; } } else outfallStats->totalLoad = NULL; } return errorcode; } int stats_getLinkStat(int index, TLinkStats *linkStats) // // Input: index // element = element to return // Return: value // Purpose: Gets a Link Stat for toolkitAPI // { int errorcode = 0; // Check if Open if (swmm_IsOpenFlag() == FALSE) { errorcode = ERR_API_INPUTNOTOPEN; } // Check if Simulation is Running else if (swmm_IsStartedFlag() == FALSE) { errorcode = ERR_API_SIM_NRUNNING; } // Check if object index is within bounds else if (index < 0 || index >= Nobjects[LINK]) { errorcode = ERR_API_OBJECT_INDEX; } else { // Copy Structure memcpy(linkStats, &LinkStats[index], sizeof(TLinkStats)); } return errorcode; } int stats_getPumpStat(int index, TPumpStats *pumpStats) // // Input: subindex // element = element to return // Return: value // Purpose: Gets a Pump Stat for toolkitAPI // { int errorcode = 0; // Check if Open if (swmm_IsOpenFlag() == FALSE) { errorcode = ERR_API_INPUTNOTOPEN; } // Check if Simulation is Running else if (swmm_IsStartedFlag() == FALSE) { errorcode = ERR_API_SIM_NRUNNING; } // Check if object index is within bounds else if (index < 0 || index >= Nobjects[LINK]) { errorcode = ERR_API_OBJECT_INDEX; } // Check if pump else if (Link[index].type != PUMP) { errorcode = ERR_API_WRONG_TYPE; } else { // fetch sub index int k = Link[index].subIndex; // Copy Structure memcpy(pumpStats, &PumpStats[k], sizeof(TPumpStats)); } return errorcode; } TSubcatchStats *stats_getSubcatchStat(int index) // // Input: index // element = element to return // Return: value // Purpose: Gets a Subcatchment Stat for toolkitAPI // { return &SubcatchStats[index]; }
program_evaluator.h
// Ceres Solver - A fast non-linear least squares minimizer // Copyright 2015 Google Inc. All rights reserved. // http://ceres-solver.org/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the name of Google Inc. nor the names of its contributors may be // used to endorse or promote products derived from this software without // specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. // // Author: keir@google.com (Keir Mierle) // // The ProgramEvaluator runs the cost functions contained in each residual block // and stores the result into a jacobian. The particular type of jacobian is // abstracted out using two template parameters: // // - An "EvaluatePreparer" that is responsible for creating the array with // pointers to the jacobian blocks where the cost function evaluates to. // - A "JacobianWriter" that is responsible for storing the resulting // jacobian blocks in the passed sparse matrix. // // This abstraction affords an efficient evaluator implementation while still // supporting writing to multiple sparse matrix formats. For example, when the // ProgramEvaluator is parameterized for writing to block sparse matrices, the // residual jacobians are written directly into their final position in the // block sparse matrix by the user's CostFunction; there is no copying. // // The evaluation is threaded with OpenMP or TBB. // // The EvaluatePreparer and JacobianWriter interfaces are as follows: // // class EvaluatePreparer { // // Prepare the jacobians array for use as the destination of a call to // // a cost function's evaluate method. // void Prepare(const ResidualBlock* residual_block, // int residual_block_index, // SparseMatrix* jacobian, // double** jacobians); // } // // class JacobianWriter { // // Create a jacobian that this writer can write. Same as // // Evaluator::CreateJacobian. // SparseMatrix* CreateJacobian() const; // // // Create num_threads evaluate preparers. Caller owns result which must // // be freed with delete[]. Resulting preparers are valid while *this is. // EvaluatePreparer* CreateEvaluatePreparers(int num_threads); // // // Write the block jacobians from a residual block evaluation to the // // larger sparse jacobian. // void Write(int residual_id, // int residual_offset, // double** jacobians, // SparseMatrix* jacobian); // } // // Note: The ProgramEvaluator is not thread safe, since internally it maintains // some per-thread scratch space. #ifndef CERES_INTERNAL_PROGRAM_EVALUATOR_H_ #define CERES_INTERNAL_PROGRAM_EVALUATOR_H_ // This include must come before any #ifndef check on Ceres compile options. #include "ceres/internal/port.h" #include <map> #include <string> #include <vector> #include "ceres/evaluation_callback.h" #include "ceres/execution_summary.h" #include "ceres/internal/eigen.h" #include "ceres/internal/scoped_ptr.h" #include "ceres/parameter_block.h" #include "ceres/program.h" #include "ceres/residual_block.h" #include "ceres/scoped_thread_token.h" #include "ceres/small_blas.h" #include "ceres/thread_token_provider.h" #if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) #include <atomic> #include "ceres/parallel_for.h" #endif namespace ceres { namespace internal { struct NullJacobianFinalizer { void operator()(SparseMatrix* jacobian, int num_parameters) {} }; template<typename EvaluatePreparer, typename JacobianWriter, typename JacobianFinalizer = NullJacobianFinalizer> class ProgramEvaluator : public Evaluator { public: ProgramEvaluator(const Evaluator::Options &options, Program* program) : options_(options), program_(program), jacobian_writer_(options, program), evaluate_preparers_( jacobian_writer_.CreateEvaluatePreparers(options.num_threads)) { #ifdef CERES_NO_THREADS if (options_.num_threads > 1) { LOG(WARNING) << "Neither OpenMP nor TBB support is compiled into this binary; " << "only options.num_threads = 1 is supported. Switching " << "to single threaded mode."; options_.num_threads = 1; } #endif // CERES_NO_THREADS BuildResidualLayout(*program, &residual_layout_); evaluate_scratch_.reset(CreateEvaluatorScratch(*program, options.num_threads)); } // Implementation of Evaluator interface. SparseMatrix* CreateJacobian() const { return jacobian_writer_.CreateJacobian(); } bool Evaluate(const Evaluator::EvaluateOptions& evaluate_options, const double* state, double* cost, double* residuals, double* gradient, SparseMatrix* jacobian) { ScopedExecutionTimer total_timer("Evaluator::Total", &execution_summary_); ScopedExecutionTimer call_type_timer(gradient == NULL && jacobian == NULL ? "Evaluator::Residual" : "Evaluator::Jacobian", &execution_summary_); // The parameters are stateful, so set the state before evaluating. if (!program_->StateVectorToParameterBlocks(state)) { return false; } // Notify the user about a new evaluation point if they are interested. if (options_.evaluation_callback != NULL) { program_->CopyParameterBlockStateToUserState(); options_.evaluation_callback->PrepareForEvaluation( /*jacobians=*/(gradient != NULL || jacobian != NULL), evaluate_options.new_evaluation_point); } if (residuals != NULL) { VectorRef(residuals, program_->NumResiduals()).setZero(); } if (jacobian != NULL) { jacobian->SetZero(); } // Each thread gets it's own cost and evaluate scratch space. for (int i = 0; i < options_.num_threads; ++i) { evaluate_scratch_[i].cost = 0.0; if (gradient != NULL) { VectorRef(evaluate_scratch_[i].gradient.get(), program_->NumEffectiveParameters()).setZero(); } } const int num_residual_blocks = program_->NumResidualBlocks(); #if !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)) ThreadTokenProvider thread_token_provider(options_.num_threads); #endif // !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)) #ifdef CERES_USE_OPENMP // This bool is used to disable the loop if an error is encountered // without breaking out of it. The remaining loop iterations are still run, // but with an empty body, and so will finish quickly. bool abort = false; #pragma omp parallel for num_threads(options_.num_threads) for (int i = 0; i < num_residual_blocks; ++i) { // Disable the loop instead of breaking, as required by OpenMP. #pragma omp flush(abort) #endif // CERES_USE_OPENMP #ifdef CERES_NO_THREADS bool abort = false; for (int i = 0; i < num_residual_blocks; ++i) { #endif // CERES_NO_THREADS #if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) std::atomic_bool abort(false); ParallelFor(options_.context, 0, num_residual_blocks, options_.num_threads, [&](int thread_id, int i) { #endif // defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) if (abort) { #if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) return; #else continue; #endif // defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) } #if !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)) const ScopedThreadToken scoped_thread_token(&thread_token_provider); const int thread_id = scoped_thread_token.token(); #endif // !(defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS)) EvaluatePreparer* preparer = &evaluate_preparers_[thread_id]; EvaluateScratch* scratch = &evaluate_scratch_[thread_id]; // Prepare block residuals if requested. const ResidualBlock* residual_block = program_->residual_blocks()[i]; double* block_residuals = NULL; if (residuals != NULL) { block_residuals = residuals + residual_layout_[i]; } else if (gradient != NULL) { block_residuals = scratch->residual_block_residuals.get(); } // Prepare block jacobians if requested. double** block_jacobians = NULL; if (jacobian != NULL || gradient != NULL) { preparer->Prepare(residual_block, i, jacobian, scratch->jacobian_block_ptrs.get()); block_jacobians = scratch->jacobian_block_ptrs.get(); } // Evaluate the cost, residuals, and jacobians. double block_cost; if (!residual_block->Evaluate( evaluate_options.apply_loss_function, &block_cost, block_residuals, block_jacobians, scratch->residual_block_evaluate_scratch.get())) { abort = true; #ifdef CERES_USE_OPENMP // This ensures that the OpenMP threads have a consistent view of 'abort'. Do // the flush inside the failure case so that there is usually only one // synchronization point per loop iteration instead of two. #pragma omp flush(abort) #endif // CERES_USE_OPENMP #if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) return; #else continue; #endif // defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) } scratch->cost += block_cost; // Store the jacobians, if they were requested. if (jacobian != NULL) { jacobian_writer_.Write(i, residual_layout_[i], block_jacobians, jacobian); } // Compute and store the gradient, if it was requested. if (gradient != NULL) { int num_residuals = residual_block->NumResiduals(); int num_parameter_blocks = residual_block->NumParameterBlocks(); for (int j = 0; j < num_parameter_blocks; ++j) { const ParameterBlock* parameter_block = residual_block->parameter_blocks()[j]; if (parameter_block->IsConstant()) { continue; } MatrixTransposeVectorMultiply<Eigen::Dynamic, Eigen::Dynamic, 1>( block_jacobians[j], num_residuals, parameter_block->LocalSize(), block_residuals, scratch->gradient.get() + parameter_block->delta_offset()); } } } #if defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) ); #endif // defined(CERES_USE_TBB) || defined(CERES_USE_CXX11_THREADS) if (!abort) { const int num_parameters = program_->NumEffectiveParameters(); // Sum the cost and gradient (if requested) from each thread. (*cost) = 0.0; if (gradient != NULL) { VectorRef(gradient, num_parameters).setZero(); } for (int i = 0; i < options_.num_threads; ++i) { (*cost) += evaluate_scratch_[i].cost; if (gradient != NULL) { VectorRef(gradient, num_parameters) += VectorRef(evaluate_scratch_[i].gradient.get(), num_parameters); } } // Finalize the Jacobian if it is available. // `num_parameters` is passed to the finalizer so that additional // storage can be reserved for additional diagonal elements if // necessary. if (jacobian != NULL) { JacobianFinalizer f; f(jacobian, num_parameters); } } return !abort; } bool Plus(const double* state, const double* delta, double* state_plus_delta) const { return program_->Plus(state, delta, state_plus_delta); } int NumParameters() const { return program_->NumParameters(); } int NumEffectiveParameters() const { return program_->NumEffectiveParameters(); } int NumResiduals() const { return program_->NumResiduals(); } virtual std::map<std::string, CallStatistics> Statistics() const { return execution_summary_.statistics(); } private: // Per-thread scratch space needed to evaluate and store each residual block. struct EvaluateScratch { void Init(int max_parameters_per_residual_block, int max_scratch_doubles_needed_for_evaluate, int max_residuals_per_residual_block, int num_parameters) { residual_block_evaluate_scratch.reset( new double[max_scratch_doubles_needed_for_evaluate]); gradient.reset(new double[num_parameters]); VectorRef(gradient.get(), num_parameters).setZero(); residual_block_residuals.reset( new double[max_residuals_per_residual_block]); jacobian_block_ptrs.reset( new double*[max_parameters_per_residual_block]); } double cost; scoped_array<double> residual_block_evaluate_scratch; // The gradient in the local parameterization. scoped_array<double> gradient; // Enough space to store the residual for the largest residual block. scoped_array<double> residual_block_residuals; scoped_array<double*> jacobian_block_ptrs; }; static void BuildResidualLayout(const Program& program, std::vector<int>* residual_layout) { const std::vector<ResidualBlock*>& residual_blocks = program.residual_blocks(); residual_layout->resize(program.NumResidualBlocks()); int residual_pos = 0; for (int i = 0; i < residual_blocks.size(); ++i) { const int num_residuals = residual_blocks[i]->NumResiduals(); (*residual_layout)[i] = residual_pos; residual_pos += num_residuals; } } // Create scratch space for each thread evaluating the program. static EvaluateScratch* CreateEvaluatorScratch(const Program& program, int num_threads) { int max_parameters_per_residual_block = program.MaxParametersPerResidualBlock(); int max_scratch_doubles_needed_for_evaluate = program.MaxScratchDoublesNeededForEvaluate(); int max_residuals_per_residual_block = program.MaxResidualsPerResidualBlock(); int num_parameters = program.NumEffectiveParameters(); EvaluateScratch* evaluate_scratch = new EvaluateScratch[num_threads]; for (int i = 0; i < num_threads; i++) { evaluate_scratch[i].Init(max_parameters_per_residual_block, max_scratch_doubles_needed_for_evaluate, max_residuals_per_residual_block, num_parameters); } return evaluate_scratch; } Evaluator::Options options_; Program* program_; JacobianWriter jacobian_writer_; scoped_array<EvaluatePreparer> evaluate_preparers_; scoped_array<EvaluateScratch> evaluate_scratch_; std::vector<int> residual_layout_; ::ceres::internal::ExecutionSummary execution_summary_; }; } // namespace internal } // namespace ceres #endif // CERES_INTERNAL_PROGRAM_EVALUATOR_H_
matrix_multiplication2.c
/****************************************************************************** * FILE: omp_mm.c * DESCRIPTION: * OpenMp Example - Matrix Multiply - C Version * Demonstrates a matrix multiply using OpenMP. Threads share row iterations * according to an auto chunk size. ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> #define NRA 62 /* number of rows in matrix A */ #define NCA 15 /* number of columns in matrix A */ #define NCB 7 /* number of columns in matrix B */ int main (int argc, char *argv[]) { int tid, nthreads, i, j, k; double a[NRA][NCA]; /* matrix A to be multiplied */ double b[NCA][NCB]; /* matrix B to be multiplied */ double c[NRA][NCB]; /* result matrix C */ double t1, t2; t1 = omp_get_wtime(); /*** Spawn a parallel region explicitly scoping all variables ***/ #pragma omp parallel shared(a, b, c, nthreads) private(tid, i, j, k) { tid = omp_get_thread_num(); if (tid == 0) { nthreads = omp_get_num_threads(); printf("Starting matrix multiple example with %d threads\n", nthreads); printf("Initializing matrices...\n"); } #pragma omp sections nowait { /*** Initialize matrices ***/ #pragma omp section for (i = 0; i < NRA; i++) { for (j = 0; j < NCA; j++) { a[i][j] = i + j; } } #pragma omp section for (i = 0; i < NCA; i++) { for (j = 0; j < NCB; j++) { b[i][j] = i * j; } } #pragma omp section for (i = 0; i < NRA; i++) { for (j = 0; j < NCB; j++) { c[i][j] = 0; } } } /*** Do matrix multiply sharing iterations on outer loop ***/ /*** Display who does which iterations for demonstration purposes ***/ printf("Thread %d starting matrix multiply...\n", tid); #pragma omp for schedule (auto) for (i = 0; i < NRA; i++) { printf("Thread=%d did row=%d\n", tid, i); for(j = 0; j < NCB; j++) { for (k = 0; k < NCA; k++) { c[i][j] += a[i][k] * b[k][j]; } } } } /*** End of parallel region ***/ t2 = omp_get_wtime(); /*** Print results ***/ printf("******************************************************\n"); printf("Result Matrix:\n"); for (i = 0; i < NRA; i++) { for (j = 0; j < NCB; j++) { printf("%6.2f ", c[i][j]); } printf("\n"); } printf("******************************************************\n"); printf("Execution time %g\n", t2 - t1); printf ("Done.\n"); return 0; }