source
stringlengths
3
92
c
stringlengths
26
2.25M
GB_bitmap_assign_IxJ_template.c
//------------------------------------------------------------------------------ // GB_bitmap_assign_IxJ_template: iterate over all of C(I,J) //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // Iterate over all positions in the IxJ Cartesian product. This is all // entries C(i,j) where i is in the list I and j is in the list J. This // traversal occurs whether or not C(i,j) is an entry present in C. // The C matrix is accessed at C(I,J). The A matrix is size |I|-by-|J|. // For bitmap assignent, C(I,J)=A is being computed. For bitmap extraction, // C=A(I,J) so the roles of A and C are swapped (see GB_bitmap_subref.c). { //-------------------------------------------------------------------------- // create the tasks to iterate over IxJ //-------------------------------------------------------------------------- int ntasks = 0, nthreads ; GB_task_struct *TaskList = NULL ; size_t TaskList_size = 0 ; GB_OK (GB_subassign_IxJ_slice (&TaskList, &TaskList_size, &ntasks, &nthreads, /* I, */ nI, /* Ikind, Icolon, J, */ nJ, /* Jkind, Jcolon, */ Context)) ; //-------------------------------------------------------------------------- // iterate over all IxJ //-------------------------------------------------------------------------- int taskid ; #pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \ reduction(+:cnvals) for (taskid = 0 ; taskid < ntasks ; taskid++) { //---------------------------------------------------------------------- // get the task descriptor //---------------------------------------------------------------------- int64_t kfirst = TaskList [taskid].kfirst ; int64_t klast = TaskList [taskid].klast ; int64_t task_cnvals = 0 ; bool fine_task = (klast == -1) ; int64_t iA_start = 0, iA_end = nI ; if (fine_task) { // a fine task operates on a slice of a single vector klast = kfirst ; iA_start = TaskList [taskid].pA ; iA_end = TaskList [taskid].pA_end ; } //---------------------------------------------------------------------- // compute all vectors in this task //---------------------------------------------------------------------- for (int64_t jA = kfirst ; jA <= klast ; jA++) { //------------------------------------------------------------------ // get jC, the corresponding vector of C //------------------------------------------------------------------ int64_t jC = GB_ijlist (J, jA, Jkind, Jcolon) ; int64_t pC0 = jC * vlen ; // first entry in C(:,jC) int64_t pA0 = jA * nI ; // first entry in A(:,jA) //------------------------------------------------------------------ // operate on C (I(iA_start,iA_end-1),jC) //------------------------------------------------------------------ for (int64_t iA = iA_start ; iA < iA_end ; iA++) { int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ; int64_t pC = iC + pC0 ; int64_t pA = iA + pA0 ; // operate on C(iC,jC) at pC (if C is bitmap or full) // and A(iA,jA) or M(iA,jA) at pA, if A and/or M are // bitmap or full. M(iA,jA) is accessed only for the // subassign method when M is bitmap or full. GB_IXJ_WORK (pC, pA) ; } } cnvals += task_cnvals ; } //-------------------------------------------------------------------------- // free workpace //-------------------------------------------------------------------------- GB_FREE_WORK (&TaskList, TaskList_size) ; }
GB_binop__ge_bool.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__ge_bool // A.*B function (eWiseMult): GB_AemultB__ge_bool // A*D function (colscale): GB_AxD__ge_bool // D*A function (rowscale): GB_DxB__ge_bool // C+=B function (dense accum): GB_Cdense_accumB__ge_bool // C+=b function (dense accum): GB_Cdense_accumb__ge_bool // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_bool // C=scalar+B GB_bind1st__ge_bool // C=scalar+B' GB_bind1st_tran__ge_bool // C=A+scalar GB_bind2nd__ge_bool // C=A'+scalar GB_bind2nd_tran__ge_bool // C type: bool // A type: bool // B,b type: bool // BinaryOp: cij = (aij >= bij) #define GB_ATYPE \ bool #define GB_BTYPE \ bool #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ bool aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ bool bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x >= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_GE || GxB_NO_BOOL || GxB_NO_GE_BOOL) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__ge_bool ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__ge_bool ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__ge_bool ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type bool bool bwork = (*((bool *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__ge_bool ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__ge_bool ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__ge_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__ge_bool ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__ge_bool ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; bool x = (*((bool *) x_input)) ; bool *Bx = (bool *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool bij = Bx [p] ; Cx [p] = (x >= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__ge_bool ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; bool *Ax = (bool *) Ax_input ; bool y = (*((bool *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { bool aij = Ax [p] ; Cx [p] = (aij >= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (x >= aij) ; \ } GrB_Info GB_bind1st_tran__ge_bool ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ bool #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool x = (*((const bool *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ bool } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ bool aij = Ax [pA] ; \ Cx [pC] = (aij >= y) ; \ } GrB_Info GB_bind2nd_tran__ge_bool ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool y = (*((const bool *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
flow.c
#include "flow.h" #include "../../comms.h" #include "../../params.h" #include "../flow_interface.h" #include <math.h> #include <stdio.h> #include <stdlib.h> // Solve a single timestep on the given mesh void solve_hydro_2d(Mesh* mesh, int tt, double* pressure, double* density, double* density_old, double* energy, double* velocity_x, double* velocity_y, double* momentum_x, double* momentum_y, double* Qxx, double* Qyy, double* mass_flux_x, double* mass_flux_y, double* momentum_x_flux_x, double* momentum_x_flux_y, double* momentum_y_flux_x, double* momentum_y_flux_y, double* reduce_array) { if (mesh->rank == MASTER) { printf("Timestep: %.12e\n", mesh->dt); } equation_of_state(mesh->local_nx, mesh->local_ny, pressure, density, energy); pressure_acceleration(mesh->local_nx, mesh->local_ny, mesh, mesh->dt, momentum_x, momentum_y, velocity_x, velocity_y, pressure, density, mesh->edgedx, mesh->edgedy, mesh->celldx, mesh->celldy); artificial_viscosity(mesh->local_nx, mesh->local_ny, mesh, mesh->dt, Qxx, Qyy, velocity_x, velocity_y, momentum_x, momentum_y, density, mesh->edgedx, mesh->edgedy, mesh->celldx, mesh->celldy); shock_heating_and_work(mesh->local_nx, mesh->local_ny, mesh, mesh->dt_h, energy, pressure, velocity_x, velocity_y, density, Qxx, Qyy, mesh->celldx, mesh->celldy); set_timestep(mesh->local_nx, mesh->local_ny, Qxx, Qyy, density, energy, mesh, reduce_array, tt == 0, mesh->celldx, mesh->celldy); // Perform advection advect_mass_and_energy(mesh->local_nx, mesh->local_ny, mesh, tt, mesh->dt, mesh->dt_h, density, energy, density_old, mass_flux_x, mass_flux_y, momentum_x_flux_x, momentum_x_flux_y, velocity_x, velocity_y, mesh->edgedx, mesh->edgedy, mesh->celldx, mesh->celldy); advect_momentum(mesh->local_nx, mesh->local_ny, tt, mesh, mesh->dt_h, mesh->dt, velocity_x, velocity_y, momentum_x_flux_x, momentum_x_flux_y, momentum_y_flux_x, momentum_y_flux_y, momentum_x, momentum_y, density, mass_flux_x, mass_flux_y, mesh->edgedx, mesh->edgedy, mesh->celldx, mesh->celldy); } // Calculate the pressure from GAMma law equation of state void equation_of_state(const int nx, const int ny, double* pressure, const double* density, const double* energy) { START_PROFILING(&compute_profile); #pragma omp parallel for for (int ii = 0; ii < ny; ++ii) { #pragma omp simd for (int jj = 0; jj < nx; ++jj) { // Only invoke simple GAMma law at the moment pressure[(ii * nx + jj)] = (GAM - 1.0) * density[(ii * nx + jj)] * energy[(ii * nx + jj)]; } } STOP_PROFILING(&compute_profile, __func__); } // Calculates the timestep from the current state void set_timestep(const int nx, const int ny, double* Qxx, double* Qyy, const double* density, const double* energy, Mesh* mesh, double* reduce_array, const int first_step, const double* celldx, const double* celldy) { const int pad = mesh->pad; double local_min_dt = mesh->max_dt; START_PROFILING(&compute_profile); // Check the minimum timestep from the sound speed in the nx and ny directions #pragma omp parallel for reduction(min : local_min_dt) for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { // Constrain based on the sound speed within the system const double c_s = sqrt(GAM * (GAM - 1.0) * energy[(ii * nx + jj)]); const double thread_min_dt_x = celldx[jj] / sqrt(c_s * c_s + 2.0 * Qxx[(ii * nx + jj)] / density[(ii * nx + jj)]); const double thread_min_dt_y = celldy[ii] / sqrt(c_s * c_s + 2.0 * Qyy[(ii * nx + jj)] / density[(ii * nx + jj)]); const double thread_min_dt = min(thread_min_dt_x, thread_min_dt_y); local_min_dt = min(local_min_dt, thread_min_dt); } } STOP_PROFILING(&compute_profile, __func__); double global_min_dt = reduce_all_min(local_min_dt); // Ensure that the timestep does not jump too far from one step to the next const double final_min_dt = min(global_min_dt, C_M * mesh->dt_h); mesh->dt = 0.5 * (C_T * final_min_dt + mesh->dt_h); mesh->dt_h = (first_step) ? mesh->dt : C_T * final_min_dt; } // Calculate change in momentum caused by pressure gradients, and then extract // the velocities using edge centered density approximations void pressure_acceleration(const int nx, const int ny, Mesh* mesh, const double dt, double* momentum_x, double* momentum_y, double* velocity_x, double* velocity_y, const double* pressure, const double* density, const double* edgedx, const double* edgedy, const double* celldx, const double* celldy) { START_PROFILING(&compute_profile); const int pad = mesh->pad; #pragma omp parallel for for (int ii = pad; ii < (ny + 1) - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < (nx + 1) - pad; ++jj) { // Update the momenta using the pressure gradients momentum_x[(ii * (nx + 1) + jj)] -= dt * (pressure[(ii * nx + jj)] - pressure[(ii * nx + jj) - 1]) / edgedx[jj]; momentum_y[(ii * nx + jj)] -= dt * (pressure[(ii * nx + jj)] - pressure[(ii * nx + jj) - nx]) / edgedy[ii]; // Calculate the zone edge centered density const double density_edge_x = (density[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density[(ii * nx + jj) - 1] * celldx[jj - 1] * celldy[ii]) / (2.0 * edgedx[jj] * celldy[ii]); const double density_edge_y = (density[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density[(ii * nx + jj) - nx] * celldx[jj] * celldy[ii - 1]) / (2.0 * celldx[jj] * edgedy[ii]); // Find the velocities from the momenta and edge centered mass densities velocity_x[(ii * (nx + 1) + jj)] = (density_edge_x == 0.0) ? 0.0 : momentum_x[(ii * (nx + 1) + jj)] / density_edge_x; velocity_y[(ii * nx + jj)] = (density_edge_y == 0.0) ? 0.0 : momentum_y[(ii * nx + jj)] / density_edge_y; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny, mesh, velocity_x, INVERT_X, PACK); handle_boundary_2d(nx, ny + 1, mesh, velocity_y, INVERT_Y, PACK); } void artificial_viscosity(const int nx, const int ny, Mesh* mesh, const double dt, double* Qxx, double* Qyy, double* velocity_x, double* velocity_y, double* momentum_x, double* momentum_y, const double* density, const double* edgedx, const double* edgedy, const double* celldx, const double* celldy) { START_PROFILING(&compute_profile); const int pad = mesh->pad; // Calculate the artificial viscous stresses // PLPC Hydro Paper #pragma omp parallel for for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { const double u_i = min(0.0, velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)]); const double u_ii = 0.5 * (fabs(min(0.0, (velocity_x[(ii * (nx + 1) + jj) + 2] - velocity_x[(ii * (nx + 1) + jj) + 1])) - min(0.0, (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)]))) + fabs(min(0.0, (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)])) - min(0.0, (velocity_x[(ii * (nx + 1) + jj)] - velocity_x[(ii * (nx + 1) + jj) - 1])))); const double v_i = min(0.0, velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)]); const double v_ii = 0.5 * (fabs(min(0.0, (velocity_y[(ii * nx + jj) + 2 * nx] - velocity_y[(ii * nx + jj) + nx])) - min(0.0, (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)]))) + fabs(min(0.0, (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)])) - min(0.0, (velocity_y[(ii * nx + jj)] - velocity_y[(ii * nx + jj) - nx])))); Qxx[(ii * nx + jj)] = -C_Q * density[(ii * nx + jj)] * u_i * u_ii; Qyy[(ii * nx + jj)] = -C_Q * density[(ii * nx + jj)] * v_i * v_ii; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny, mesh, Qxx, NO_INVERT, PACK); handle_boundary_2d(nx, ny, mesh, Qyy, NO_INVERT, PACK); START_PROFILING(&compute_profile); // Update the momenta by the artificial viscous stresses #pragma omp parallel for for (int ii = pad; ii < (ny + 1) - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < (nx + 1) - pad; ++jj) { momentum_x[(ii * (nx + 1) + jj)] -= dt * (Qxx[(ii * nx + jj)] - Qxx[(ii * nx + jj) - 1]) / celldx[jj]; momentum_y[(ii * nx + jj)] -= dt * (Qyy[(ii * nx + jj)] - Qyy[(ii * nx + jj) - nx]) / celldy[ii]; // Calculate the zone edge centered density const double density_edge_x = (density[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density[(ii * nx + jj) - 1] * celldx[jj - 1] * celldy[ii]) / (2.0 * edgedx[jj] * celldy[ii]); const double density_edge_y = (density[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density[(ii * nx + jj) - nx] * celldx[jj] * celldy[ii - 1]) / (2.0 * celldx[jj] * edgedy[ii]); // Find the velocities from the momenta and edge centered mass densities velocity_x[(ii * (nx + 1) + jj)] = (density_edge_x == 0.0) ? 0.0 : momentum_x[(ii * (nx + 1) + jj)] / density_edge_x; velocity_y[(ii * nx + jj)] = (density_edge_y == 0.0) ? 0.0 : momentum_y[(ii * nx + jj)] / density_edge_y; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny, mesh, velocity_x, INVERT_X, PACK); handle_boundary_2d(nx, ny + 1, mesh, velocity_y, INVERT_Y, PACK); } // Calculates the work done due to forces within the element void shock_heating_and_work(const int nx, const int ny, Mesh* mesh, const double dt_h, double* energy, const double* pressure, const double* velocity_x, const double* velocity_y, const double* density, const double* Qxx, const double* Qyy, const double* celldx, const double* celldy) { START_PROFILING(&compute_profile); const int pad = mesh->pad; #pragma omp parallel for for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { const double div_vel_x = (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)]) / celldx[jj]; const double div_vel_y = (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)]) / celldy[ii]; const double div_vel_dt = (div_vel_x + div_vel_y) * dt_h; const double e_q = energy[(ii * nx + jj)] - dt_h * (Qxx[(ii * nx + jj)] * div_vel_x + Qyy[(ii * nx + jj)] * div_vel_y) / density[(ii * nx + jj)]; /// A working formulation that is second order in time for Pressure!? const double density_c = density[(ii * nx + jj)] / (1.0 + div_vel_dt); const double e_c = e_q - (pressure[(ii * nx + jj)] * div_vel_dt) / density[(ii * nx + jj)]; const double work = 0.5 * div_vel_dt * (pressure[(ii * nx + jj)] + (GAM - 1.0) * e_c * density_c) / density[(ii * nx + jj)]; energy[(ii * nx + jj)] = (density[(ii * nx + jj)] == 0.0) ? 0.0 : e_q - work; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny, mesh, energy, NO_INVERT, PACK); } // Perform advection with monotonicity improvement void advect_mass_and_energy(const int nx, const int ny, Mesh* mesh, const int tt, const double dt, const double dt_h, double* density, double* energy, double* density_old, double* mass_flux_x, double* mass_flux_y, double* eF_x, double* eF_y, const double* velocity_x, const double* velocity_y, const double* edgedx, const double* edgedy, const double* celldx, const double* celldy) { START_PROFILING(&compute_profile); #pragma omp parallel for for (int ii = 0; ii < nx * ny; ++ii) { density_old[ii] = density[ii]; } STOP_PROFILING(&compute_profile, "storing_old_density"); if (tt % 2 == 0) { x_mass_and_energy_flux(nx, ny, 1, mesh, dt, dt_h, density, density_old, energy, velocity_x, mass_flux_x, eF_x, celldx, edgedx, celldy, edgedy); y_mass_and_energy_flux(nx, ny, 0, mesh, dt, dt_h, density, density_old, energy, velocity_y, mass_flux_y, eF_y, celldx, edgedx, celldy, edgedy); } else { y_mass_and_energy_flux(nx, ny, 1, mesh, dt, dt_h, density, density_old, energy, velocity_y, mass_flux_y, eF_y, celldx, edgedx, celldy, edgedy); x_mass_and_energy_flux(nx, ny, 0, mesh, dt, dt_h, density, density_old, energy, velocity_x, mass_flux_x, eF_x, celldx, edgedx, celldy, edgedy); } } // Calculate the flux in the x direction void x_mass_and_energy_flux(const int nx, const int ny, const int first, Mesh* mesh, const double dt, const double dt_h, double* density, double* density_old, double* energy, const double* velocity_x, double* mass_flux_x, double* eF_x, const double* celldx, const double* edgedx, const double* celldy, const double* edgedy) { const int pad = mesh->pad; // Compute the mass fluxes along the x edges // In the ghost cells flux is left as 0.0 START_PROFILING(&compute_profile); #pragma omp parallel for for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < (nx + 1) - pad; ++jj) { // Interpolate to make second order in time const double invdx = 1.0 / edgedx[jj]; const double suc0 = 0.5 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj) - 1]); const double sur0 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj)] - velocity_x[(ii * (nx + 1) + jj) - 1]); const double sul0 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)]); const double u_tc = velocity_x[(ii * (nx + 1) + jj)] - 0.5 * velocity_x[(ii * (nx + 1) + jj)] * dt * minmod(suc0, minmod(sur0, sul0)); // Van leer limiter double limiter = 0.0; const double density_diff = (density[(ii * nx + jj)] - density[(ii * nx + jj) - 1]); if (density_diff) { const double smoothness = (u_tc >= 0.0) ? (density[(ii * nx + jj) - 1] - density[(ii * nx + jj) - 2]) / density_diff : (density[(ii * nx + jj) + 1] - density[(ii * nx + jj)]) / density_diff; limiter = (smoothness + fabs(smoothness)) / (1.0 + fabs(smoothness)); } // Calculate the flux const double density_upwind = (u_tc >= 0.0) ? density[(ii * nx + jj) - 1] : density[(ii * nx + jj)]; mass_flux_x[(ii * (nx + 1) + jj)] = (u_tc * density_upwind + 0.5 * fabs(u_tc) * (1.0 - fabs((u_tc * dt_h) / celldx[jj])) * limiter * density_diff); // Use MC limiter to get slope of energy const double a_x_0 = 0.5 * invdx * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - 2]); const double b_x_0 = 2.0 * invdx * (energy[(ii * nx + jj) - 1] - energy[(ii * nx + jj) - 2]); const double c_x_0 = 2.0 * invdx * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - 1]); const double a_x_1 = 0.5 * invdx * (energy[(ii * nx + jj) + 1] - energy[(ii * nx + jj) - 1]); const double b_x_1 = 2.0 * invdx * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - 1]); const double c_x_1 = 2.0 * invdx * (energy[(ii * nx + jj) + 1] - energy[(ii * nx + jj)]); // Calculate the interpolated densities const double edge_e_x = (u_tc > 0.0) ? energy[(ii * nx + jj) - 1] + 0.5 * minmod(minmod(a_x_0, b_x_0), c_x_0) * (celldx[jj - 1] - u_tc * dt_h) : energy[(ii * nx + jj)] - 0.5 * minmod(minmod(a_x_1, b_x_1), c_x_1) * (celldx[jj] + u_tc * dt_h); // Update the fluxes to now include the contribution from energy eF_x[(ii * (nx + 1) + jj)] = edgedy[ii] * edge_e_x * mass_flux_x[(ii * (nx + 1) + jj)]; } } STOP_PROFILING(&compute_profile, "advect_mass_and_energy"); handle_boundary_2d(nx + 1, ny, mesh, mass_flux_x, INVERT_X, PACK); // Calculate the new density values START_PROFILING(&compute_profile); #pragma omp parallel for for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { density[(ii * nx + jj)] -= dt_h * (edgedy[ii + 1] * mass_flux_x[(ii * (nx + 1) + jj) + 1] - edgedy[ii] * mass_flux_x[(ii * (nx + 1) + jj)]) / (celldx[jj] * celldy[ii]); const double density_e = (density_old[(ii * nx + jj)] * energy[(ii * nx + jj)] - (dt_h * (eF_x[(ii * (nx + 1) + jj) + 1] - eF_x[(ii * (nx + 1) + jj)])) / (celldx[jj] * celldy[ii])); energy[(ii * nx + jj)] = (first) ? (density_old[(ii * nx + jj)] == 0.0) ? 0.0 : density_e / density_old[(ii * nx + jj)] : (density[(ii * nx + jj)] == 0.0) ? 0.0 : density_e / density[(ii * nx + jj)]; } } STOP_PROFILING(&compute_profile, "advect_mass_and_energy"); handle_boundary_2d(nx, ny, mesh, density, NO_INVERT, PACK); handle_boundary_2d(nx, ny, mesh, energy, NO_INVERT, PACK); } // Calculate the flux in the y direction void y_mass_and_energy_flux(const int nx, const int ny, const int first, Mesh* mesh, const double dt, const double dt_h, double* density, double* density_old, double* energy, const double* velocity_y, double* mass_flux_y, double* eF_y, const double* celldx, const double* edgedx, const double* celldy, const double* edgedy) { const int pad = mesh->pad; // Compute the mass flux along the y edges // In the ghost cells flux is left as 0.0 START_PROFILING(&compute_profile); #pragma omp parallel for for (int ii = pad; ii < (ny + 1) - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { // Interpolate the velocity to make second order in time const double invdy = 1.0 / edgedy[ii]; const double svc0 = 0.5 * invdy * (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj) - nx]); const double svr0 = 2.0 * invdy * (velocity_y[(ii * nx + jj)] - velocity_y[(ii * nx + jj) - nx]); const double svl0 = 2.0 * invdy * (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)]); const double v_tc = velocity_y[(ii * nx + jj)] - 0.5 * velocity_y[(ii * nx + jj)] * dt * minmod(svc0, minmod(svr0, svl0)); // Van leer limiter const double density_diff = (density[(ii * nx + jj)] - density[(ii * nx + jj) - nx]); double limiter = 0.0; if (density_diff) { const double smoothness = (velocity_y[(ii * nx + jj)] >= 0.0) ? (density[(ii * nx + jj) - nx] - density[(ii * nx + jj) - 2 * nx]) / density_diff : (density[(ii * nx + jj) + nx] - density[(ii * nx + jj)]) / density_diff; limiter = (smoothness + fabs(smoothness)) / (1.0 + fabs(smoothness)); } // Calculate the flux const double density_upwind = (v_tc >= 0.0) ? density[(ii * nx + jj) - nx] : density[(ii * nx + jj)]; mass_flux_y[(ii * nx + jj)] = (v_tc * density_upwind + 0.5 * fabs(v_tc) * (1.0 - fabs((v_tc * dt_h) / celldy[ii])) * limiter * density_diff); // Use MC limiter to get slope of energy const double a_y_0 = 0.5 * invdy * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - 2 * nx]); const double b_y_0 = 2.0 * invdy * (energy[(ii * nx + jj) - nx] - energy[(ii * nx + jj) - 2 * nx]); const double c_y_0 = 2.0 * invdy * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - nx]); const double a_y_1 = 0.5 * invdy * (energy[(ii * nx + jj) + nx] - energy[(ii * nx + jj) - nx]); const double b_y_1 = 2.0 * invdy * (energy[(ii * nx + jj)] - energy[(ii * nx + jj) - nx]); const double c_y_1 = 2.0 * invdy * (energy[(ii * nx + jj) + nx] - energy[(ii * nx + jj)]); const double edge_e_y = (v_tc > 0.0) ? energy[(ii * nx + jj) - nx] + 0.5 * minmod(minmod(a_y_0, b_y_0), c_y_0) * (celldy[ii - 1] - v_tc * dt_h) : energy[(ii * nx + jj)] - 0.5 * minmod(minmod(a_y_1, b_y_1), c_y_1) * (celldy[ii] + v_tc * dt_h); // Update the fluxes to now include the contribution from energy eF_y[(ii * nx + jj)] = edgedx[jj] * edge_e_y * mass_flux_y[(ii * nx + jj)]; } } STOP_PROFILING(&compute_profile, "advect_mass_and_energy"); handle_boundary_2d(nx, ny + 1, mesh, mass_flux_y, INVERT_Y, PACK); // Calculate the new density values START_PROFILING(&compute_profile); #pragma omp parallel for for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { density[(ii * nx + jj)] -= dt_h * (edgedx[jj + 1] * mass_flux_y[(ii * nx + jj) + nx] - edgedx[jj] * mass_flux_y[(ii * nx + jj)]) / (celldx[jj] * celldy[ii]); const double density_e = (density_old[(ii * nx + jj)] * energy[(ii * nx + jj)] - (dt_h * (eF_y[(ii * nx + jj) + nx] - eF_y[(ii * nx + jj)])) / (celldx[jj] * celldy[ii])); energy[(ii * nx + jj)] = (first) ? (density_old[(ii * nx + jj)] == 0.0) ? 0.0 : density_e / density_old[(ii * nx + jj)] : (density[(ii * nx + jj)] == 0.0) ? 0.0 : density_e / density[(ii * nx + jj)]; } } STOP_PROFILING(&compute_profile, "advect_mass_and_energy"); handle_boundary_2d(nx, ny, mesh, density, NO_INVERT, PACK); handle_boundary_2d(nx, ny, mesh, energy, NO_INVERT, PACK); } // Advect momentum according to the velocity void advect_momentum(const int nx, const int ny, const int tt, Mesh* mesh, const double dt_h, const double dt, double* velocity_x, double* velocity_y, double* momentum_x_flux_x, double* momentum_x_flux_y, double* momentum_y_flux_x, double* momentum_y_flux_y, double* momentum_x, double* momentum_y, const double* density, const double* mass_flux_x, const double* mass_flux_y, const double* edgedx, const double* edgedy, const double* celldx, const double* celldy) { const int pad = mesh->pad; if (tt % 2) { START_PROFILING(&compute_profile); momentum_x_flux_in_x(nx, ny, mesh, dt_h, velocity_x, momentum_x_flux_x, mass_flux_x, edgedx, edgedy, celldx); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny, mesh, momentum_x_flux_x, NO_INVERT, PACK); START_PROFILING(&compute_profile); #pragma omp parallel for for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < (nx + 1) - pad; ++jj) { momentum_x[(ii * (nx + 1) + jj)] -= dt_h * (momentum_x_flux_x[(ii * nx + jj)] - momentum_x_flux_x[(ii * nx + jj) - 1]) / (edgedx[jj] * celldy[ii]); const double density_edge_x = (density[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density[(ii * nx + jj) - 1] * celldx[jj - 1] * celldy[ii]) / (2.0 * edgedx[jj] * celldy[ii]); velocity_x[(ii * (nx + 1) + jj)] = (density_edge_x == 0.0) ? 0.0 : momentum_x[(ii * (nx + 1) + jj)] / density_edge_x; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny, mesh, velocity_x, INVERT_X, PACK); START_PROFILING(&compute_profile); momentum_x_flux_in_y(nx, ny, mesh, dt_h, velocity_x, velocity_y, momentum_x_flux_y, mass_flux_y, edgedx, edgedy, celldy); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny + 1, mesh, momentum_x_flux_y, NO_INVERT, PACK); START_PROFILING(&compute_profile); // Calculate the axial momentum #pragma omp parallel for for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < (nx + 1) - pad; ++jj) { momentum_x[(ii * (nx + 1) + jj)] -= dt_h * (momentum_x_flux_y[(ii * (nx + 1) + jj) + (nx + 1)] - momentum_x_flux_y[(ii * (nx + 1) + jj)]) / (celldx[jj] * edgedy[ii]); } } momentum_y_flux_in_x(nx, ny, mesh, dt_h, velocity_x, velocity_y, momentum_y_flux_x, mass_flux_x, edgedx, celldy, celldx); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny + 1, mesh, momentum_y_flux_x, NO_INVERT, PACK); START_PROFILING(&compute_profile); #pragma omp parallel for for (int ii = pad; ii < (ny + 1) - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { momentum_y[(ii * nx + jj)] -= dt_h * (momentum_y_flux_x[(ii * (nx + 1) + jj) + 1] - momentum_y_flux_x[(ii * (nx + 1) + jj)]) / (edgedx[jj] * celldy[ii]); const double density_edge_y = (density[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density[(ii * nx + jj) - nx] * celldx[jj] * celldy[ii - 1]) / (2.0 * celldx[jj] * edgedy[ii]); velocity_y[(ii * nx + jj)] = (density_edge_y == 0.0) ? 0.0 : momentum_y[(ii * nx + jj)] / density_edge_y; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny + 1, mesh, velocity_y, INVERT_Y, PACK); START_PROFILING(&compute_profile); momentum_y_flux_in_y(nx, ny, mesh, dt_h, velocity_y, momentum_y_flux_y, mass_flux_y, edgedy, celldx, celldy); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny, mesh, momentum_y_flux_y, NO_INVERT, PACK); START_PROFILING(&compute_profile); #pragma omp parallel for for (int ii = pad; ii < (ny + 1) - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { momentum_y[(ii * nx + jj)] -= dt_h * (momentum_y_flux_y[(ii * nx + jj)] - momentum_y_flux_y[(ii * nx + jj) - nx]) / (celldx[jj] * edgedy[ii]); } } STOP_PROFILING(&compute_profile, __func__); } else { START_PROFILING(&compute_profile); momentum_x_flux_in_y(nx, ny, mesh, dt_h, velocity_x, velocity_y, momentum_x_flux_y, mass_flux_y, edgedx, edgedy, celldy); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny + 1, mesh, momentum_x_flux_y, NO_INVERT, PACK); START_PROFILING(&compute_profile); // Calculate the axial momentum #pragma omp parallel for for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < (nx + 1) - pad; ++jj) { momentum_x[(ii * (nx + 1) + jj)] -= dt_h * (momentum_x_flux_y[(ii * (nx + 1) + jj) + (nx + 1)] - momentum_x_flux_y[(ii * (nx + 1) + jj)]) / (celldx[jj] * edgedy[ii]); const double density_edge_x = (density[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density[(ii * nx + jj) - 1] * celldx[jj - 1] * celldy[ii]) / (2.0 * edgedx[jj] * celldy[ii]); velocity_x[(ii * (nx + 1) + jj)] = (density_edge_x == 0.0) ? 0.0 : momentum_x[(ii * (nx + 1) + jj)] / density_edge_x; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny, mesh, velocity_x, INVERT_X, PACK); START_PROFILING(&compute_profile); momentum_x_flux_in_x(nx, ny, mesh, dt_h, velocity_x, momentum_x_flux_x, mass_flux_x, edgedx, edgedy, celldx); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny, mesh, momentum_x_flux_x, NO_INVERT, PACK); START_PROFILING(&compute_profile); #pragma omp parallel for for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < (nx + 1) - pad; ++jj) { momentum_x[(ii * (nx + 1) + jj)] -= dt_h * (momentum_x_flux_x[(ii * nx + jj)] - momentum_x_flux_x[(ii * nx + jj) - 1]) / (edgedx[jj] * celldy[ii]); } } momentum_y_flux_in_y(nx, ny, mesh, dt_h, velocity_y, momentum_y_flux_y, mass_flux_y, edgedy, celldx, celldy); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny, mesh, momentum_y_flux_y, NO_INVERT, PACK); START_PROFILING(&compute_profile); #pragma omp parallel for for (int ii = pad; ii < (ny + 1) - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { momentum_y[(ii * nx + jj)] -= dt_h * (momentum_y_flux_y[(ii * nx + jj)] - momentum_y_flux_y[(ii * nx + jj) - nx]) / (celldx[jj] * edgedy[ii]); const double density_edge_y = (density[(ii * nx + jj)] * celldx[jj] * celldy[ii] + density[(ii * nx + jj) - nx] * celldx[jj] * celldy[ii - 1]) / (2.0 * celldx[jj] * edgedy[ii]); velocity_y[(ii * nx + jj)] = (density_edge_y == 0.0) ? 0.0 : momentum_y[(ii * nx + jj)] / density_edge_y; } } STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx, ny + 1, mesh, velocity_y, INVERT_Y, PACK); START_PROFILING(&compute_profile); momentum_y_flux_in_x(nx, ny, mesh, dt_h, velocity_x, velocity_y, momentum_y_flux_x, mass_flux_x, edgedx, celldy, celldx); STOP_PROFILING(&compute_profile, __func__); handle_boundary_2d(nx + 1, ny + 1, mesh, momentum_y_flux_x, NO_INVERT, PACK); START_PROFILING(&compute_profile); #pragma omp parallel for for (int ii = pad; ii < (ny + 1) - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { momentum_y[(ii * nx + jj)] -= dt_h * (momentum_y_flux_x[(ii * (nx + 1) + jj) + 1] - momentum_y_flux_x[(ii * (nx + 1) + jj)]) / (edgedx[jj] * celldy[ii]); } } STOP_PROFILING(&compute_profile, __func__); } } // Calculates the x momentum flux along the x dimension void momentum_x_flux_in_x(const int nx, const int ny, Mesh* mesh, const double dt_h, double* velocity_x, double* momentum_x_flux_x, const double* mass_flux_x, const double* edgedx, const double* edgedy, const double* celldx) { const int pad = mesh->pad; // Calculate the cell centered x momentum fluxes in the x direction #pragma omp parallel for for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { // Use MC limiter to get slope of velocity const double invdx = 1.0 / edgedx[jj]; const double a_x_0 = 0.5 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj) - 1]); const double b_x_0 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj)] - velocity_x[(ii * (nx + 1) + jj) - 1]); const double c_x_0 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)]); const double a_x_1 = 0.5 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 2] - velocity_x[(ii * (nx + 1) + jj)]); const double b_x_1 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 1] - velocity_x[(ii * (nx + 1) + jj)]); const double c_x_1 = 2.0 * invdx * (velocity_x[(ii * (nx + 1) + jj) + 2] - velocity_x[(ii * (nx + 1) + jj) + 1]); // Calculate the interpolated densities const double u_cell_x = 0.5 * (velocity_x[(ii * (nx + 1) + jj)] + velocity_x[(ii * (nx + 1) + jj) + 1]); const double F_x = edgedy[ii] * 0.5 * (mass_flux_x[(ii * (nx + 1) + jj)] + mass_flux_x[(ii * (nx + 1) + jj) + 1]); const double u_cell_x_interp = (u_cell_x > 0.0) ? velocity_x[(ii * (nx + 1) + jj)] + 0.5 * minmod(minmod(a_x_0, b_x_0), c_x_0) * (celldx[jj - 1] - u_cell_x * dt_h) : velocity_x[(ii * (nx + 1) + jj) + 1] - 0.5 * minmod(minmod(a_x_1, b_x_1), c_x_1) * (celldx[jj] + u_cell_x * dt_h); momentum_x_flux_x[(ii * nx + jj)] = F_x * u_cell_x_interp; } } } // Calculates the x momentum flux in the y dimension void momentum_x_flux_in_y(const int nx, const int ny, Mesh* mesh, const double dt_h, double* velocity_x, double* velocity_y, double* momentum_x_flux_y, const double* mass_flux_y, const double* edgedx, const double* edgedy, const double* celldy) { const int pad = mesh->pad; #pragma omp parallel for for (int ii = pad; ii < (ny + 1) - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < (nx + 1) - pad; ++jj) { // Use MC limiter to get slope of velocity const double invdy = 1.0 / edgedy[ii]; const double a_y_0 = 0.5 * invdy * (velocity_x[(ii * (nx + 1) + jj)] - velocity_x[(ii * (nx + 1) + jj) - 2 * (nx + 1)]); const double b_y_0 = 2.0 * invdy * (velocity_x[(ii * (nx + 1) + jj) - (nx + 1)] - velocity_x[(ii * (nx + 1) + jj) - 2 * (nx + 1)]); const double c_y_0 = 2.0 * invdy * (velocity_x[(ii * (nx + 1) + jj)] - velocity_x[(ii * (nx + 1) + jj) - (nx + 1)]); const double a_y_1 = 0.5 * invdy * (velocity_x[(ii * (nx + 1) + jj) + (nx + 1)] - velocity_x[(ii * (nx + 1) + jj) - (nx + 1)]); const double b_y_1 = 2.0 * invdy * (velocity_x[(ii * (nx + 1) + jj)] - velocity_x[(ii * (nx + 1) + jj) - (nx + 1)]); const double c_y_1 = 2.0 * invdy * (velocity_x[(ii * (nx + 1) + jj) + (nx + 1)] - velocity_x[(ii * (nx + 1) + jj)]); const double v_cell_y = 0.5 * (velocity_y[(ii * nx + jj) - 1] + velocity_y[(ii * nx + jj)]); const double F_y = edgedx[jj] * 0.5 * (mass_flux_y[(ii * nx + jj)] + mass_flux_y[(ii * nx + jj) - 1]); const double u_corner_y = (v_cell_y > 0.0) ? velocity_x[(ii * (nx + 1) + jj) - (nx + 1)] + 0.5 * minmod(minmod(a_y_0, b_y_0), c_y_0) * (celldy[ii - 1] - v_cell_y * dt_h) : velocity_x[(ii * (nx + 1) + jj)] - 0.5 * minmod(minmod(a_y_1, b_y_1), c_y_1) * (celldy[ii] + v_cell_y * dt_h); momentum_x_flux_y[(ii * (nx + 1) + jj)] = F_y * u_corner_y; } } } // Calculates the y momentum flux in the x dimension void momentum_y_flux_in_x(const int nx, const int ny, Mesh* mesh, const double dt_h, const double* velocity_x, double* velocity_y, double* momentum_y_flux_x, const double* mass_flux_x, const double* edgedx, const double* celldy, const double* celldx) { const int pad = mesh->pad; // Calculate the corner centered y momentum fluxes in the x direction // Calculate the cell centered y momentum fluxes in the y direction #pragma omp parallel for for (int ii = pad; ii < (ny + 1) - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < (nx + 1) - pad; ++jj) { // Use MC limiter to get slope of velocity const double invdx = 1.0 / edgedx[jj]; const double a_x_0 = 0.5 * invdx * (velocity_y[(ii * nx + jj)] - velocity_y[(ii * nx + jj) - 2]); const double b_x_0 = 2.0 * invdx * (velocity_y[(ii * nx + jj) - 1] - velocity_y[(ii * nx + jj) - 2]); const double c_x_0 = 2.0 * invdx * (velocity_y[(ii * nx + jj)] - velocity_y[(ii * nx + jj) - 1]); const double a_x_1 = 0.5 * invdx * (velocity_y[(ii * nx + jj) + 1] - velocity_y[(ii * nx + jj) - 1]); const double b_x_1 = 2.0 * invdx * (velocity_y[(ii * nx + jj)] - velocity_y[(ii * nx + jj) - 1]); const double c_x_1 = 2.0 * invdx * (velocity_y[(ii * nx + jj) + 1] - velocity_y[(ii * nx + jj)]); // Calculate the interpolated densities const double F_x = celldy[ii] * 0.5 * (mass_flux_x[(ii * (nx + 1) + jj)] + mass_flux_x[(ii * (nx + 1) + jj) - (nx + 1)]); const double u_cell_x = 0.5 * (velocity_x[(ii * (nx + 1) + jj)] + velocity_x[(ii * (nx + 1) + jj) - (nx + 1)]); const double v_cell_x_interp = (u_cell_x > 0.0) ? velocity_y[(ii * nx + jj) - 1] + 0.5 * minmod(minmod(a_x_0, b_x_0), c_x_0) * (celldx[jj - 1] - u_cell_x * dt_h) : velocity_y[(ii * nx + jj)] - 0.5 * minmod(minmod(a_x_1, b_x_1), c_x_1) * (celldx[jj] + u_cell_x * dt_h); momentum_y_flux_x[(ii * (nx + 1) + jj)] = F_x * v_cell_x_interp; } } } // Calculates the y momentum flux in the y dimension void momentum_y_flux_in_y(const int nx, const int ny, Mesh* mesh, const double dt_h, double* velocity_y, double* momentum_y_flux_y, const double* mass_flux_y, const double* edgedy, const double* celldx, const double* celldy) { const int pad = mesh->pad; #pragma omp parallel for for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { // Use MC limiter to get slope of velocity const double invdy = 1.0 / edgedy[ii]; const double a_y_0 = 0.5 * invdy * (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj) - nx]); const double b_y_0 = 2.0 * invdy * (velocity_y[(ii * nx + jj)] - velocity_y[(ii * nx + jj) - nx]); const double c_y_0 = 2.0 * invdy * (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)]); const double a_y_1 = 0.5 * invdy * (velocity_y[(ii * nx + jj) + 2 * nx] - velocity_y[(ii * nx + jj)]); const double b_y_1 = 2.0 * invdy * (velocity_y[(ii * nx + jj) + nx] - velocity_y[(ii * nx + jj)]); const double c_y_1 = 2.0 * invdy * (velocity_y[(ii * nx + jj) + 2 * nx] - velocity_y[(ii * nx + jj) + nx]); const double F_y = celldx[jj] * 0.5 * (mass_flux_y[(ii * nx + jj)] + mass_flux_y[(ii * nx + jj) + nx]); const double v_cell_y = 0.5 * (velocity_y[(ii * nx + jj)] + velocity_y[(ii * nx + jj) + nx]); const double v_cell_y_interp = (v_cell_y > 0.0) ? velocity_y[(ii * nx + jj)] + 0.5 * minmod(minmod(a_y_0, b_y_0), c_y_0) * (celldy[ii - 1] - v_cell_y * dt_h) : velocity_y[(ii * nx + jj) + nx] - 0.5 * minmod(minmod(a_y_1, b_y_1), c_y_1) * (celldy[ii] + v_cell_y * dt_h); momentum_y_flux_y[(ii * nx + jj)] = F_y * v_cell_y_interp; } } } // Prints some conservation values void print_conservation(const int nx, const int ny, double* density, double* energy, double* reduce_array, Mesh* mesh) { double mass_tot = 0.0; double energy_tot = 0.0; const int pad = mesh->pad; #pragma omp parallel for reduction(+ : mass_tot, energy_tot) for (int ii = pad; ii < ny - pad; ++ii) { #pragma omp simd for (int jj = pad; jj < nx - pad; ++jj) { mass_tot += density[(ii * nx + jj)]; energy_tot += density[(ii * nx + jj)] * energy[(ii * nx + jj)]; } } double global_mass_tot = reduce_to_master(mass_tot); double global_energy_tot = reduce_to_master(energy_tot); if (mesh->rank == MASTER) { printf("Total mass: %.12e\n", global_mass_tot); printf("Total energy: %.12e\n", global_energy_tot); } }
GB_unop__asinh_fc32_fc32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__asinh_fc32_fc32 // op(A') function: GB_unop_tran__asinh_fc32_fc32 // C type: GxB_FC32_t // A type: GxB_FC32_t // cast: GxB_FC32_t cij = aij // unaryop: cij = casinhf (aij) #define GB_ATYPE \ GxB_FC32_t #define GB_CTYPE \ GxB_FC32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC32_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = casinhf (x) ; // casting #define GB_CAST(z, aij) \ GxB_FC32_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC32_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ GxB_FC32_t z = aij ; \ Cx [pC] = casinhf (z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASINH || GxB_NO_FC32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__asinh_fc32_fc32 ( GxB_FC32_t *Cx, // Cx and Ax may be aliased const GxB_FC32_t *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC32_t aij = Ax [p] ; GxB_FC32_t z = aij ; Cx [p] = casinhf (z) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__asinh_fc32_fc32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
filter.h
#ifndef OPENMC_TALLIES_FILTER_H #define OPENMC_TALLIES_FILTER_H #include <cstdint> #include <memory> #include <string> #include <unordered_map> #include <vector> #include <gsl/gsl> #include "openmc/hdf5_interface.h" #include "openmc/particle.h" #include "pugixml.hpp" namespace openmc { //============================================================================== //! Stores bins and weights for filtered tally events. //============================================================================== class FilterMatch { public: std::vector<int> bins_; std::vector<double> weights_; int i_bin_; bool bins_present_ {false}; }; } // namespace openmc // Without an explicit instantiation of vector<FilterMatch>, the Intel compiler // will complain about the threadprivate directive on filter_matches. Note that // this has to happen *outside* of the openmc namespace extern template class std::vector<openmc::FilterMatch>; namespace openmc { //============================================================================== //! Modifies tally score events. //============================================================================== class Filter { public: //---------------------------------------------------------------------------- // Constructors, destructors, factory functions Filter(); virtual ~Filter(); //! Create a new tally filter // //! \param[in] type Type of the filter //! \param[in] id Unique ID for the filter. If none is passed, an ID is //! automatically assigned //! \return Pointer to the new filter object static Filter* create(const std::string& type, int32_t id = -1); //! Create a new tally filter from an XML node // //! \param[in] node XML node //! \return Pointer to the new filter object static Filter* create(pugi::xml_node node); //! Uses an XML input to fill the filter's data fields. virtual void from_xml(pugi::xml_node node) = 0; //---------------------------------------------------------------------------- // Methods virtual std::string type() const = 0; //! Matches a tally event to a set of filter bins and weights. //! //! \param[out] match will contain the matching bins and corresponding //! weights; note that there may be zero matching bins virtual void get_all_bins(const Particle* p, int estimator, FilterMatch& match) const = 0; //! Writes data describing this filter to an HDF5 statepoint group. virtual void to_statepoint(hid_t filter_group) const { write_dataset(filter_group, "type", type()); write_dataset(filter_group, "n_bins", n_bins_); } //! Return a string describing a filter bin for the tallies.out file. // //! For example, an `EnergyFilter` might return the string //! "Incoming Energy [0.625E-6, 20.0)". virtual std::string text_label(int bin) const = 0; //---------------------------------------------------------------------------- // Accessors //! Get unique ID of filter //! \return Unique ID int32_t id() const { return id_; } //! Assign a unique ID to the filter //! \param[in] Unique ID to assign. A value of -1 indicates that an ID should //! be automatically assigned void set_id(int32_t id); //! Get number of bins //! \return Number of bins int n_bins() const { return n_bins_; } gsl::index index() const { return index_; } //---------------------------------------------------------------------------- // Data members protected: int n_bins_; private: int32_t id_ {-1}; gsl::index index_; }; //============================================================================== // Global variables //============================================================================== namespace simulation { extern std::vector<FilterMatch> filter_matches; #pragma omp threadprivate(filter_matches) } // namespace simulation namespace model { extern "C" int32_t n_filters; extern std::vector<std::unique_ptr<Filter>> tally_filters; extern std::unordered_map<int, int> filter_map; } //============================================================================== // Non-member functions //============================================================================== //! Make sure index corresponds to a valid filter int verify_filter(int32_t index); } // namespace openmc #endif // OPENMC_TALLIES_FILTER_H
DRB026-targetparallelfor-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* Race condition due to anti-dependence within a loop offloaded to accelerators. Data race pair: a[i]@64:5 vs. a[i+1]@64:10 */ #include <omp.h> int main(int argc,char *argv[]) { int i; int len = 1000; int a[1000]; #pragma omp parallel for private (i) for (i = 0; i <= len - 1; i += 1) { a[i] = i; } for (i = 0; i <= len - 1 - 1; i += 1) { a[i] = a[i + 1] + 1; } for (i = 0; i <= len - 1; i += 1) { printf("%d\n",a[i]); } return 0; }
Stmt.h
//===--- Stmt.h - Classes for representing statements -----------*- C++ -*-===// // // The LLVM Compiler Infrastructure // // This file is distributed under the University of Illinois Open Source // License. See LICENSE.TXT for details. // //===----------------------------------------------------------------------===// // // This file defines the Stmt interface and subclasses. // //===----------------------------------------------------------------------===// #ifndef LLVM_CLANG_AST_STMT_H #define LLVM_CLANG_AST_STMT_H #include "clang/AST/DeclGroup.h" #include "clang/AST/StmtIterator.h" #include "clang/Basic/CapturedStmt.h" #include "clang/Basic/IdentifierTable.h" #include "clang/Basic/LLVM.h" #include "clang/Basic/SourceLocation.h" #include "llvm/ADT/ArrayRef.h" #include "llvm/ADT/PointerIntPair.h" #include "llvm/Support/Compiler.h" #include "llvm/Support/ErrorHandling.h" #include <string> namespace llvm { class FoldingSetNodeID; } namespace clang { class ASTContext; class Attr; class CapturedDecl; class Decl; class Expr; class IdentifierInfo; class LabelDecl; class ParmVarDecl; class PrinterHelper; struct PrintingPolicy; class QualType; class RecordDecl; class SourceManager; class StringLiteral; class SwitchStmt; class Token; class VarDecl; //===--------------------------------------------------------------------===// // ExprIterator - Iterators for iterating over Stmt* arrays that contain // only Expr*. This is needed because AST nodes use Stmt* arrays to store // references to children (to be compatible with StmtIterator). //===--------------------------------------------------------------------===// class Stmt; class Expr; class ExprIterator : public std::iterator<std::forward_iterator_tag, Expr *&, ptrdiff_t, Expr *&, Expr *&> { Stmt** I; public: ExprIterator(Stmt** i) : I(i) {} ExprIterator() : I(nullptr) {} ExprIterator& operator++() { ++I; return *this; } ExprIterator operator-(size_t i) { return I-i; } ExprIterator operator+(size_t i) { return I+i; } Expr* operator[](size_t idx); // FIXME: Verify that this will correctly return a signed distance. signed operator-(const ExprIterator& R) const { return I - R.I; } Expr* operator*() const; Expr* operator->() const; bool operator==(const ExprIterator& R) const { return I == R.I; } bool operator!=(const ExprIterator& R) const { return I != R.I; } bool operator>(const ExprIterator& R) const { return I > R.I; } bool operator>=(const ExprIterator& R) const { return I >= R.I; } }; class ConstExprIterator : public std::iterator<std::forward_iterator_tag, const Expr *&, ptrdiff_t, const Expr *&, const Expr *&> { const Stmt * const *I; public: ConstExprIterator(const Stmt * const *i) : I(i) {} ConstExprIterator() : I(nullptr) {} ConstExprIterator& operator++() { ++I; return *this; } ConstExprIterator operator+(size_t i) const { return I+i; } ConstExprIterator operator-(size_t i) const { return I-i; } const Expr * operator[](size_t idx) const; signed operator-(const ConstExprIterator& R) const { return I - R.I; } const Expr * operator*() const; const Expr * operator->() const; bool operator==(const ConstExprIterator& R) const { return I == R.I; } bool operator!=(const ConstExprIterator& R) const { return I != R.I; } bool operator>(const ConstExprIterator& R) const { return I > R.I; } bool operator>=(const ConstExprIterator& R) const { return I >= R.I; } }; //===----------------------------------------------------------------------===// // AST classes for statements. // // /////////////////////////////////////////////////////////////////////////////// /// Stmt - This represents one statement. /// class LLVM_ALIGNAS(LLVM_PTR_SIZE) Stmt { public: enum StmtClass { NoStmtClass = 0, #define STMT(CLASS, PARENT) CLASS##Class, #define STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class, #define LAST_STMT_RANGE(BASE, FIRST, LAST) \ first##BASE##Constant=FIRST##Class, last##BASE##Constant=LAST##Class #define ABSTRACT_STMT(STMT) #include "clang/AST/StmtNodes.inc" }; // Make vanilla 'new' and 'delete' illegal for Stmts. protected: void* operator new(size_t bytes) throw() { llvm_unreachable("Stmts cannot be allocated with regular 'new'."); } void operator delete(void* data) throw() { llvm_unreachable("Stmts cannot be released with regular 'delete'."); } class StmtBitfields { friend class Stmt; /// \brief The statement class. unsigned sClass : 8; }; enum { NumStmtBits = 8 }; class CompoundStmtBitfields { friend class CompoundStmt; unsigned : NumStmtBits; unsigned NumStmts : 32 - NumStmtBits; }; class ExprBitfields { friend class Expr; friend class DeclRefExpr; // computeDependence friend class InitListExpr; // ctor friend class DesignatedInitExpr; // ctor friend class BlockDeclRefExpr; // ctor friend class ASTStmtReader; // deserialization friend class CXXNewExpr; // ctor friend class DependentScopeDeclRefExpr; // ctor friend class CXXConstructExpr; // ctor friend class CallExpr; // ctor friend class OffsetOfExpr; // ctor friend class ObjCMessageExpr; // ctor friend class ObjCArrayLiteral; // ctor friend class ObjCDictionaryLiteral; // ctor friend class ShuffleVectorExpr; // ctor friend class ParenListExpr; // ctor friend class CXXUnresolvedConstructExpr; // ctor friend class CXXDependentScopeMemberExpr; // ctor friend class OverloadExpr; // ctor friend class PseudoObjectExpr; // ctor friend class AtomicExpr; // ctor unsigned : NumStmtBits; unsigned ValueKind : 2; unsigned ObjectKind : 2; unsigned TypeDependent : 1; unsigned ValueDependent : 1; unsigned InstantiationDependent : 1; unsigned ContainsUnexpandedParameterPack : 1; }; enum { NumExprBits = 16 }; class CharacterLiteralBitfields { friend class CharacterLiteral; unsigned : NumExprBits; unsigned Kind : 2; }; enum APFloatSemantics { IEEEhalf, IEEEsingle, IEEEdouble, x87DoubleExtended, IEEEquad, PPCDoubleDouble }; class FloatingLiteralBitfields { friend class FloatingLiteral; unsigned : NumExprBits; unsigned Semantics : 3; // Provides semantics for APFloat construction unsigned IsExact : 1; }; class UnaryExprOrTypeTraitExprBitfields { friend class UnaryExprOrTypeTraitExpr; unsigned : NumExprBits; unsigned Kind : 3; // HLSL Change unsigned IsType : 1; // true if operand is a type, false if an expression. }; class DeclRefExprBitfields { friend class DeclRefExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned HasQualifier : 1; unsigned HasTemplateKWAndArgsInfo : 1; unsigned HasFoundDecl : 1; unsigned HadMultipleCandidates : 1; unsigned RefersToEnclosingVariableOrCapture : 1; }; class CastExprBitfields { friend class CastExpr; unsigned : NumExprBits; unsigned Kind : 7; // HLSL Change unsigned BasePathSize : 32 - 7 - NumExprBits; // HLSL Change }; class CallExprBitfields { friend class CallExpr; unsigned : NumExprBits; unsigned NumPreArgs : 1; }; class ExprWithCleanupsBitfields { friend class ExprWithCleanups; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; unsigned NumObjects : 32 - NumExprBits; }; class PseudoObjectExprBitfields { friend class PseudoObjectExpr; friend class ASTStmtReader; // deserialization unsigned : NumExprBits; // These don't need to be particularly wide, because they're // strictly limited by the forms of expressions we permit. unsigned NumSubExprs : 8; unsigned ResultIndex : 32 - 8 - NumExprBits; }; class ObjCIndirectCopyRestoreExprBitfields { friend class ObjCIndirectCopyRestoreExpr; unsigned : NumExprBits; unsigned ShouldCopy : 1; }; class InitListExprBitfields { friend class InitListExpr; unsigned : NumExprBits; /// Whether this initializer list originally had a GNU array-range /// designator in it. This is a temporary marker used by CodeGen. unsigned HadArrayRangeDesignator : 1; }; class TypeTraitExprBitfields { friend class TypeTraitExpr; friend class ASTStmtReader; friend class ASTStmtWriter; unsigned : NumExprBits; /// \brief The kind of type trait, which is a value of a TypeTrait enumerator. unsigned Kind : 8; /// \brief If this expression is not value-dependent, this indicates whether /// the trait evaluated true or false. unsigned Value : 1; /// \brief The number of arguments to this type trait. unsigned NumArgs : 32 - 8 - 1 - NumExprBits; }; union { StmtBitfields StmtBits; CompoundStmtBitfields CompoundStmtBits; ExprBitfields ExprBits; CharacterLiteralBitfields CharacterLiteralBits; FloatingLiteralBitfields FloatingLiteralBits; UnaryExprOrTypeTraitExprBitfields UnaryExprOrTypeTraitExprBits; DeclRefExprBitfields DeclRefExprBits; CastExprBitfields CastExprBits; CallExprBitfields CallExprBits; ExprWithCleanupsBitfields ExprWithCleanupsBits; PseudoObjectExprBitfields PseudoObjectExprBits; ObjCIndirectCopyRestoreExprBitfields ObjCIndirectCopyRestoreExprBits; InitListExprBitfields InitListExprBits; TypeTraitExprBitfields TypeTraitExprBits; }; friend class ASTStmtReader; friend class ASTStmtWriter; public: // Only allow allocation of Stmts using the allocator in ASTContext // or by doing a placement new. void* operator new(size_t bytes, const ASTContext& C, unsigned alignment = 8); void* operator new(size_t bytes, const ASTContext* C, unsigned alignment = 8) { return operator new(bytes, *C, alignment); } void* operator new(size_t bytes, void* mem) throw() { return mem; } void operator delete(void*, const ASTContext&, unsigned) throw() { } void operator delete(void*, const ASTContext*, unsigned) throw() { } void operator delete(void*, size_t) throw() { } void operator delete(void*, void*) throw() { } public: /// \brief A placeholder type used to construct an empty shell of a /// type, that will be filled in later (e.g., by some /// de-serialization). struct EmptyShell { }; private: /// \brief Whether statistic collection is enabled. static bool StatisticsEnabled; protected: /// \brief Construct an empty statement. explicit Stmt(StmtClass SC, EmptyShell) : Stmt(SC) {} public: Stmt(StmtClass SC) { static_assert(sizeof(*this) % llvm::AlignOf<void *>::Alignment == 0, "Insufficient alignment!"); StmtBits.sClass = SC; if (StatisticsEnabled) Stmt::addStmtClass(SC); } StmtClass getStmtClass() const { return static_cast<StmtClass>(StmtBits.sClass); } const char *getStmtClassName() const; /// SourceLocation tokens are not useful in isolation - they are low level /// value objects created/interpreted by SourceManager. We assume AST /// clients will have a pointer to the respective SourceManager. SourceRange getSourceRange() const LLVM_READONLY; SourceLocation getLocStart() const LLVM_READONLY; SourceLocation getLocEnd() const LLVM_READONLY; // global temp stats (until we have a per-module visitor) static void addStmtClass(const StmtClass s); static void EnableStatistics(); static void PrintStats(); /// \brief Dumps the specified AST fragment and all subtrees to /// \c llvm::errs(). void dump() const; void dump(SourceManager &SM) const; void dump(raw_ostream &OS, SourceManager &SM) const; void dump(raw_ostream &OS) const; /// dumpColor - same as dump(), but forces color highlighting. void dumpColor() const; /// dumpPretty/printPretty - These two methods do a "pretty print" of the AST /// back to its original source language syntax. void dumpPretty(const ASTContext &Context) const; void printPretty(raw_ostream &OS, PrinterHelper *Helper, const PrintingPolicy &Policy, unsigned Indentation = 0) const; /// viewAST - Visualize an AST rooted at this Stmt* using GraphViz. Only /// works on systems with GraphViz (Mac OS X) or dot+gv installed. void viewAST() const; /// Skip past any implicit AST nodes which might surround this /// statement, such as ExprWithCleanups or ImplicitCastExpr nodes. Stmt *IgnoreImplicit(); /// \brief Skip no-op (attributed, compound) container stmts and skip captured /// stmt at the top, if \a IgnoreCaptured is true. Stmt *IgnoreContainers(bool IgnoreCaptured = false); const Stmt *stripLabelLikeStatements() const; Stmt *stripLabelLikeStatements() { return const_cast<Stmt*>( const_cast<const Stmt*>(this)->stripLabelLikeStatements()); } /// Child Iterators: All subclasses must implement 'children' /// to permit easy iteration over the substatements/subexpessions of an /// AST node. This permits easy iteration over all nodes in the AST. typedef StmtIterator child_iterator; typedef ConstStmtIterator const_child_iterator; typedef StmtRange child_range; typedef ConstStmtRange const_child_range; child_range children(); const_child_range children() const { return const_cast<Stmt*>(this)->children(); } child_iterator child_begin() { return children().first; } child_iterator child_end() { return children().second; } const_child_iterator child_begin() const { return children().first; } const_child_iterator child_end() const { return children().second; } /// \brief Produce a unique representation of the given statement. /// /// \param ID once the profiling operation is complete, will contain /// the unique representation of the given statement. /// /// \param Context the AST context in which the statement resides /// /// \param Canonical whether the profile should be based on the canonical /// representation of this statement (e.g., where non-type template /// parameters are identified by index/level rather than their /// declaration pointers) or the exact representation of the statement as /// written in the source. void Profile(llvm::FoldingSetNodeID &ID, const ASTContext &Context, bool Canonical) const; }; /// DeclStmt - Adaptor class for mixing declarations with statements and /// expressions. For example, CompoundStmt mixes statements, expressions /// and declarations (variables, types). Another example is ForStmt, where /// the first statement can be an expression or a declaration. /// class DeclStmt : public Stmt { DeclGroupRef DG; SourceLocation StartLoc, EndLoc; public: DeclStmt(DeclGroupRef dg, SourceLocation startLoc, SourceLocation endLoc) : Stmt(DeclStmtClass), DG(dg), StartLoc(startLoc), EndLoc(endLoc) {} /// \brief Build an empty declaration statement. explicit DeclStmt(EmptyShell Empty) : Stmt(DeclStmtClass, Empty) { } /// isSingleDecl - This method returns true if this DeclStmt refers /// to a single Decl. bool isSingleDecl() const { return DG.isSingleDecl(); } const Decl *getSingleDecl() const { return DG.getSingleDecl(); } Decl *getSingleDecl() { return DG.getSingleDecl(); } const DeclGroupRef getDeclGroup() const { return DG; } DeclGroupRef getDeclGroup() { return DG; } void setDeclGroup(DeclGroupRef DGR) { DG = DGR; } SourceLocation getStartLoc() const { return StartLoc; } void setStartLoc(SourceLocation L) { StartLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return StartLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DeclStmtClass; } // Iterators over subexpressions. child_range children() { return child_range(child_iterator(DG.begin(), DG.end()), child_iterator(DG.end(), DG.end())); } typedef DeclGroupRef::iterator decl_iterator; typedef DeclGroupRef::const_iterator const_decl_iterator; typedef llvm::iterator_range<decl_iterator> decl_range; typedef llvm::iterator_range<const_decl_iterator> decl_const_range; decl_range decls() { return decl_range(decl_begin(), decl_end()); } decl_const_range decls() const { return decl_const_range(decl_begin(), decl_end()); } decl_iterator decl_begin() { return DG.begin(); } decl_iterator decl_end() { return DG.end(); } const_decl_iterator decl_begin() const { return DG.begin(); } const_decl_iterator decl_end() const { return DG.end(); } typedef std::reverse_iterator<decl_iterator> reverse_decl_iterator; reverse_decl_iterator decl_rbegin() { return reverse_decl_iterator(decl_end()); } reverse_decl_iterator decl_rend() { return reverse_decl_iterator(decl_begin()); } }; /// NullStmt - This is the null statement ";": C99 6.8.3p3. /// class NullStmt : public Stmt { SourceLocation SemiLoc; /// \brief True if the null statement was preceded by an empty macro, e.g: /// @code /// #define CALL(x) /// CALL(0); /// @endcode bool HasLeadingEmptyMacro; public: NullStmt(SourceLocation L, bool hasLeadingEmptyMacro = false) : Stmt(NullStmtClass), SemiLoc(L), HasLeadingEmptyMacro(hasLeadingEmptyMacro) {} /// \brief Build an empty null statement. explicit NullStmt(EmptyShell Empty) : Stmt(NullStmtClass, Empty), HasLeadingEmptyMacro(false) { } SourceLocation getSemiLoc() const { return SemiLoc; } void setSemiLoc(SourceLocation L) { SemiLoc = L; } bool hasLeadingEmptyMacro() const { return HasLeadingEmptyMacro; } SourceLocation getLocStart() const LLVM_READONLY { return SemiLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SemiLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == NullStmtClass; } child_range children() { return child_range(); } friend class ASTStmtReader; friend class ASTStmtWriter; }; // HLSL Change: Adding discard statement support /// discard - This is the hlsl discard statement "discard;". /// class DiscardStmt : public Stmt { SourceLocation Loc; public: DiscardStmt(SourceLocation L) : Stmt(DiscardStmtClass) , Loc(L) {} /// \brief Build an empty Discard statement. explicit DiscardStmt(EmptyShell Empty) : Stmt(DiscardStmtClass, Empty) {} SourceLocation getLoc() const { return Loc; } void setLoc(SourceLocation L) { Loc = L; } SourceLocation getLocStart() const LLVM_READONLY { return Loc; } SourceLocation getLocEnd() const LLVM_READONLY { return Loc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DiscardStmtClass; } child_range children() { return child_range(); } friend class ASTStmtReader; friend class ASTStmtWriter; }; // End of HLSL Change /// CompoundStmt - This represents a group of statements like { stmt stmt }. /// class CompoundStmt : public Stmt { Stmt** Body; SourceLocation LBraceLoc, RBraceLoc; friend class ASTStmtReader; public: CompoundStmt(const ASTContext &C, ArrayRef<Stmt*> Stmts, SourceLocation LB, SourceLocation RB); // \brief Build an empty compound statement with a location. explicit CompoundStmt(SourceLocation Loc) : Stmt(CompoundStmtClass), Body(nullptr), LBraceLoc(Loc), RBraceLoc(Loc) { CompoundStmtBits.NumStmts = 0; } // \brief Build an empty compound statement. explicit CompoundStmt(EmptyShell Empty) : Stmt(CompoundStmtClass, Empty), Body(nullptr) { CompoundStmtBits.NumStmts = 0; } void setStmts(const ASTContext &C, Stmt **Stmts, unsigned NumStmts); bool body_empty() const { return CompoundStmtBits.NumStmts == 0; } unsigned size() const { return CompoundStmtBits.NumStmts; } typedef Stmt** body_iterator; typedef llvm::iterator_range<body_iterator> body_range; body_range body() { return body_range(body_begin(), body_end()); } body_iterator body_begin() { return Body; } body_iterator body_end() { return Body + size(); } Stmt *body_front() { return !body_empty() ? Body[0] : nullptr; } Stmt *body_back() { return !body_empty() ? Body[size()-1] : nullptr; } void setLastStmt(Stmt *S) { assert(!body_empty() && "setLastStmt"); Body[size()-1] = S; } typedef Stmt* const * const_body_iterator; typedef llvm::iterator_range<const_body_iterator> body_const_range; body_const_range body() const { return body_const_range(body_begin(), body_end()); } const_body_iterator body_begin() const { return Body; } const_body_iterator body_end() const { return Body + size(); } const Stmt *body_front() const { return !body_empty() ? Body[0] : nullptr; } const Stmt *body_back() const { return !body_empty() ? Body[size() - 1] : nullptr; } typedef std::reverse_iterator<body_iterator> reverse_body_iterator; reverse_body_iterator body_rbegin() { return reverse_body_iterator(body_end()); } reverse_body_iterator body_rend() { return reverse_body_iterator(body_begin()); } typedef std::reverse_iterator<const_body_iterator> const_reverse_body_iterator; const_reverse_body_iterator body_rbegin() const { return const_reverse_body_iterator(body_end()); } const_reverse_body_iterator body_rend() const { return const_reverse_body_iterator(body_begin()); } SourceLocation getLocStart() const LLVM_READONLY { return LBraceLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RBraceLoc; } SourceLocation getLBracLoc() const { return LBraceLoc; } SourceLocation getRBracLoc() const { return RBraceLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == CompoundStmtClass; } // Iterators child_range children() { return child_range(Body, Body + CompoundStmtBits.NumStmts); } const_child_range children() const { return child_range(Body, Body + CompoundStmtBits.NumStmts); } }; // SwitchCase is the base class for CaseStmt and DefaultStmt, class SwitchCase : public Stmt { protected: // A pointer to the following CaseStmt or DefaultStmt class, // used by SwitchStmt. SwitchCase *NextSwitchCase; SourceLocation KeywordLoc; SourceLocation ColonLoc; SwitchCase(StmtClass SC, SourceLocation KWLoc, SourceLocation ColonLoc) : Stmt(SC), NextSwitchCase(nullptr), KeywordLoc(KWLoc), ColonLoc(ColonLoc) { } SwitchCase(StmtClass SC, EmptyShell) : Stmt(SC), NextSwitchCase(nullptr) {} public: const SwitchCase *getNextSwitchCase() const { return NextSwitchCase; } SwitchCase *getNextSwitchCase() { return NextSwitchCase; } void setNextSwitchCase(SwitchCase *SC) { NextSwitchCase = SC; } SourceLocation getKeywordLoc() const { return KeywordLoc; } void setKeywordLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Stmt *getSubStmt(); const Stmt *getSubStmt() const { return const_cast<SwitchCase*>(this)->getSubStmt(); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY; static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass || T->getStmtClass() == DefaultStmtClass; } }; class CaseStmt : public SwitchCase { SourceLocation EllipsisLoc; enum { LHS, RHS, SUBSTMT, END_EXPR }; Stmt* SubExprs[END_EXPR]; // The expression for the RHS is Non-null for // GNU "case 1 ... 4" extension public: CaseStmt(Expr *lhs, Expr *rhs, SourceLocation caseLoc, SourceLocation ellipsisLoc, SourceLocation colonLoc) : SwitchCase(CaseStmtClass, caseLoc, colonLoc) { SubExprs[SUBSTMT] = nullptr; SubExprs[LHS] = reinterpret_cast<Stmt*>(lhs); SubExprs[RHS] = reinterpret_cast<Stmt*>(rhs); EllipsisLoc = ellipsisLoc; } /// \brief Build an empty switch case statement. explicit CaseStmt(EmptyShell Empty) : SwitchCase(CaseStmtClass, Empty) { } SourceLocation getCaseLoc() const { return KeywordLoc; } void setCaseLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getEllipsisLoc() const { return EllipsisLoc; } void setEllipsisLoc(SourceLocation L) { EllipsisLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } Expr *getLHS() { return reinterpret_cast<Expr*>(SubExprs[LHS]); } Expr *getRHS() { return reinterpret_cast<Expr*>(SubExprs[RHS]); } Stmt *getSubStmt() { return SubExprs[SUBSTMT]; } const Expr *getLHS() const { return reinterpret_cast<const Expr*>(SubExprs[LHS]); } const Expr *getRHS() const { return reinterpret_cast<const Expr*>(SubExprs[RHS]); } const Stmt *getSubStmt() const { return SubExprs[SUBSTMT]; } void setSubStmt(Stmt *S) { SubExprs[SUBSTMT] = S; } void setLHS(Expr *Val) { SubExprs[LHS] = reinterpret_cast<Stmt*>(Val); } void setRHS(Expr *Val) { SubExprs[RHS] = reinterpret_cast<Stmt*>(Val); } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { // Handle deeply nested case statements with iteration instead of recursion. const CaseStmt *CS = this; while (const CaseStmt *CS2 = dyn_cast<CaseStmt>(CS->getSubStmt())) CS = CS2; return CS->getSubStmt()->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CaseStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[END_EXPR]); } }; class DefaultStmt : public SwitchCase { Stmt* SubStmt; public: DefaultStmt(SourceLocation DL, SourceLocation CL, Stmt *substmt) : SwitchCase(DefaultStmtClass, DL, CL), SubStmt(substmt) {} /// \brief Build an empty default statement. explicit DefaultStmt(EmptyShell Empty) : SwitchCase(DefaultStmtClass, Empty) { } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setSubStmt(Stmt *S) { SubStmt = S; } SourceLocation getDefaultLoc() const { return KeywordLoc; } void setDefaultLoc(SourceLocation L) { KeywordLoc = L; } SourceLocation getColonLoc() const { return ColonLoc; } void setColonLoc(SourceLocation L) { ColonLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return KeywordLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} static bool classof(const Stmt *T) { return T->getStmtClass() == DefaultStmtClass; } // Iterators child_range children() { return child_range(&SubStmt, &SubStmt+1); } }; inline SourceLocation SwitchCase::getLocEnd() const { if (const CaseStmt *CS = dyn_cast<CaseStmt>(this)) return CS->getLocEnd(); return cast<DefaultStmt>(this)->getLocEnd(); } /// LabelStmt - Represents a label, which has a substatement. For example: /// foo: return; /// class LabelStmt : public Stmt { SourceLocation IdentLoc; LabelDecl *TheDecl; Stmt *SubStmt; public: LabelStmt(SourceLocation IL, LabelDecl *D, Stmt *substmt) : Stmt(LabelStmtClass), IdentLoc(IL), TheDecl(D), SubStmt(substmt) { static_assert(sizeof(LabelStmt) == 2 * sizeof(SourceLocation) + 2 * sizeof(void *), "LabelStmt too big"); } // \brief Build an empty label statement. explicit LabelStmt(EmptyShell Empty) : Stmt(LabelStmtClass, Empty) { } SourceLocation getIdentLoc() const { return IdentLoc; } LabelDecl *getDecl() const { return TheDecl; } void setDecl(LabelDecl *D) { TheDecl = D; } const char *getName() const; Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } void setIdentLoc(SourceLocation L) { IdentLoc = L; } void setSubStmt(Stmt *SS) { SubStmt = SS; } SourceLocation getLocStart() const LLVM_READONLY { return IdentLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == LabelStmtClass; } }; /// \brief Represents an attribute applied to a statement. /// /// Represents an attribute applied to a statement. For example: /// [[omp::for(...)]] for (...) { ... } /// class AttributedStmt : public Stmt { Stmt *SubStmt; SourceLocation AttrLoc; unsigned NumAttrs; friend class ASTStmtReader; AttributedStmt(SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt) : Stmt(AttributedStmtClass), SubStmt(SubStmt), AttrLoc(Loc), NumAttrs(Attrs.size()) { memcpy(getAttrArrayPtr(), Attrs.data(), Attrs.size() * sizeof(Attr *)); } explicit AttributedStmt(EmptyShell Empty, unsigned NumAttrs) : Stmt(AttributedStmtClass, Empty), NumAttrs(NumAttrs) { memset(getAttrArrayPtr(), 0, NumAttrs * sizeof(Attr *)); } Attr *const *getAttrArrayPtr() const { return reinterpret_cast<Attr *const *>(this + 1); } Attr **getAttrArrayPtr() { return reinterpret_cast<Attr **>(this + 1); } public: static AttributedStmt *Create(const ASTContext &C, SourceLocation Loc, ArrayRef<const Attr*> Attrs, Stmt *SubStmt); // \brief Build an empty attributed statement. static AttributedStmt *CreateEmpty(const ASTContext &C, unsigned NumAttrs); SourceLocation getAttrLoc() const { return AttrLoc; } ArrayRef<const Attr*> getAttrs() const { return llvm::makeArrayRef(getAttrArrayPtr(), NumAttrs); } Stmt *getSubStmt() { return SubStmt; } const Stmt *getSubStmt() const { return SubStmt; } SourceLocation getLocStart() const LLVM_READONLY { return AttrLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubStmt->getLocEnd();} child_range children() { return child_range(&SubStmt, &SubStmt + 1); } static bool classof(const Stmt *T) { return T->getStmtClass() == AttributedStmtClass; } }; /// IfStmt - This represents an if/then/else. /// class IfStmt : public Stmt { enum { VAR, COND, THEN, ELSE, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation IfLoc; SourceLocation ElseLoc; public: IfStmt(const ASTContext &C, SourceLocation IL, VarDecl *var, Expr *cond, Stmt *then, SourceLocation EL = SourceLocation(), Stmt *elsev = nullptr); /// \brief Build an empty if/then/else statement explicit IfStmt(EmptyShell Empty) : Stmt(IfStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "if" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// if (int x = foo()) { /// printf("x is %d", x); /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this IfStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } const Stmt *getThen() const { return SubExprs[THEN]; } void setThen(Stmt *S) { SubExprs[THEN] = S; } const Stmt *getElse() const { return SubExprs[ELSE]; } void setElse(Stmt *S) { SubExprs[ELSE] = S; } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Stmt *getThen() { return SubExprs[THEN]; } Stmt *getElse() { return SubExprs[ELSE]; } SourceLocation getIfLoc() const { return IfLoc; } void setIfLoc(SourceLocation L) { IfLoc = L; } SourceLocation getElseLoc() const { return ElseLoc; } void setElseLoc(SourceLocation L) { ElseLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return IfLoc; } SourceLocation getLocEnd() const LLVM_READONLY { if (SubExprs[ELSE]) return SubExprs[ELSE]->getLocEnd(); else return SubExprs[THEN]->getLocEnd(); } // Iterators over subexpressions. The iterators will include iterating // over the initialization expression referenced by the condition variable. child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == IfStmtClass; } }; /// SwitchStmt - This represents a 'switch' stmt. /// class SwitchStmt : public Stmt { SourceLocation SwitchLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // This points to a linked list of case and default statements and, if the // SwitchStmt is a switch on an enum value, records whether all the enum // values were covered by CaseStmts. The coverage information value is meant // to be a hint for possible clients. llvm::PointerIntPair<SwitchCase *, 1, bool> FirstCase; public: SwitchStmt(const ASTContext &C, VarDecl *Var, Expr *cond); /// \brief Build a empty switch statement. explicit SwitchStmt(EmptyShell Empty) : Stmt(SwitchStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "switch" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// switch (int x = foo()) { /// case 0: break; /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this SwitchStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Stmt *getBody() const { return SubExprs[BODY]; } const SwitchCase *getSwitchCaseList() const { return FirstCase.getPointer(); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt *>(E); } Stmt *getBody() { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SwitchCase *getSwitchCaseList() { return FirstCase.getPointer(); } /// \brief Set the case list for this switch statement. void setSwitchCaseList(SwitchCase *SC) { FirstCase.setPointer(SC); } SourceLocation getSwitchLoc() const { return SwitchLoc; } void setSwitchLoc(SourceLocation L) { SwitchLoc = L; } void setBody(Stmt *S, SourceLocation SL) { SubExprs[BODY] = S; SwitchLoc = SL; } void addSwitchCase(SwitchCase *SC) { assert(!SC->getNextSwitchCase() && "case/default already added to a switch"); SC->setNextSwitchCase(FirstCase.getPointer()); FirstCase.setPointer(SC); } /// Set a flag in the SwitchStmt indicating that if the 'switch (X)' is a /// switch over an enum value then all cases have been explicitly covered. void setAllEnumCasesCovered() { FirstCase.setInt(true); } /// Returns true if the SwitchStmt is a switch of an enum value and all cases /// have been explicitly covered. bool isAllEnumCasesCovered() const { return FirstCase.getInt(); } SourceLocation getLocStart() const LLVM_READONLY { return SwitchLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY] ? SubExprs[BODY]->getLocEnd() : SubExprs[COND]->getLocEnd(); } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } static bool classof(const Stmt *T) { return T->getStmtClass() == SwitchStmtClass; } }; /// WhileStmt - This represents a 'while' stmt. /// class WhileStmt : public Stmt { SourceLocation WhileLoc; enum { VAR, COND, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; public: WhileStmt(const ASTContext &C, VarDecl *Var, Expr *cond, Stmt *body, SourceLocation WL); /// \brief Build an empty while statement. explicit WhileStmt(EmptyShell Empty) : Stmt(WhileStmtClass, Empty) { } /// \brief Retrieve the variable declared in this "while" statement, if any. /// /// In the following example, "x" is the condition variable. /// \code /// while (int x = random()) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this WhileStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[VAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return WhileLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == WhileStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// DoStmt - This represents a 'do/while' stmt. /// class DoStmt : public Stmt { SourceLocation DoLoc; enum { BODY, COND, END_EXPR }; Stmt* SubExprs[END_EXPR]; SourceLocation WhileLoc; SourceLocation RParenLoc; // Location of final ')' in do stmt condition. public: DoStmt(Stmt *body, Expr *cond, SourceLocation DL, SourceLocation WL, SourceLocation RP) : Stmt(DoStmtClass), DoLoc(DL), WhileLoc(WL), RParenLoc(RP) { SubExprs[COND] = reinterpret_cast<Stmt*>(cond); SubExprs[BODY] = body; } /// \brief Build an empty do-while statement. explicit DoStmt(EmptyShell Empty) : Stmt(DoStmtClass, Empty) { } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getBody() const { return SubExprs[BODY]; } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getDoLoc() const { return DoLoc; } void setDoLoc(SourceLocation L) { DoLoc = L; } SourceLocation getWhileLoc() const { return WhileLoc; } void setWhileLoc(SourceLocation L) { WhileLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return DoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == DoStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// ForStmt - This represents a 'for (init;cond;inc)' stmt. Note that any of /// the init/cond/inc parts of the ForStmt will be null if they were not /// specified in the source. /// class ForStmt : public Stmt { SourceLocation ForLoc; enum { INIT, CONDVAR, COND, INC, BODY, END_EXPR }; Stmt* SubExprs[END_EXPR]; // SubExprs[INIT] is an expression or declstmt. SourceLocation LParenLoc, RParenLoc; public: ForStmt(const ASTContext &C, Stmt *Init, Expr *Cond, VarDecl *condVar, Expr *Inc, Stmt *Body, SourceLocation FL, SourceLocation LP, SourceLocation RP); /// \brief Build an empty for statement. explicit ForStmt(EmptyShell Empty) : Stmt(ForStmtClass, Empty) { } Stmt *getInit() { return SubExprs[INIT]; } /// \brief Retrieve the variable declared in this "for" statement, if any. /// /// In the following example, "y" is the condition variable. /// \code /// for (int x = random(); int y = mangle(x); ++x) { /// // ... /// } /// \endcode VarDecl *getConditionVariable() const; void setConditionVariable(const ASTContext &C, VarDecl *V); /// If this ForStmt has a condition variable, return the faux DeclStmt /// associated with the creation of that condition variable. const DeclStmt *getConditionVariableDeclStmt() const { return reinterpret_cast<DeclStmt*>(SubExprs[CONDVAR]); } Expr *getCond() { return reinterpret_cast<Expr*>(SubExprs[COND]); } Expr *getInc() { return reinterpret_cast<Expr*>(SubExprs[INC]); } Stmt *getBody() { return SubExprs[BODY]; } const Stmt *getInit() const { return SubExprs[INIT]; } const Expr *getCond() const { return reinterpret_cast<Expr*>(SubExprs[COND]);} const Expr *getInc() const { return reinterpret_cast<Expr*>(SubExprs[INC]); } const Stmt *getBody() const { return SubExprs[BODY]; } void setInit(Stmt *S) { SubExprs[INIT] = S; } void setCond(Expr *E) { SubExprs[COND] = reinterpret_cast<Stmt*>(E); } void setInc(Expr *E) { SubExprs[INC] = reinterpret_cast<Stmt*>(E); } void setBody(Stmt *S) { SubExprs[BODY] = S; } SourceLocation getForLoc() const { return ForLoc; } void setForLoc(SourceLocation L) { ForLoc = L; } SourceLocation getLParenLoc() const { return LParenLoc; } void setLParenLoc(SourceLocation L) { LParenLoc = L; } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ForLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return SubExprs[BODY]->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == ForStmtClass; } // Iterators child_range children() { return child_range(&SubExprs[0], &SubExprs[0]+END_EXPR); } }; /// GotoStmt - This represents a direct goto. /// class GotoStmt : public Stmt { LabelDecl *Label; SourceLocation GotoLoc; SourceLocation LabelLoc; public: GotoStmt(LabelDecl *label, SourceLocation GL, SourceLocation LL) : Stmt(GotoStmtClass), Label(label), GotoLoc(GL), LabelLoc(LL) {} /// \brief Build an empty goto statement. explicit GotoStmt(EmptyShell Empty) : Stmt(GotoStmtClass, Empty) { } LabelDecl *getLabel() const { return Label; } void setLabel(LabelDecl *D) { Label = D; } SourceLocation getGotoLoc() const { return GotoLoc; } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getLabelLoc() const { return LabelLoc; } void setLabelLoc(SourceLocation L) { LabelLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LabelLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GotoStmtClass; } // Iterators child_range children() { return child_range(); } }; /// IndirectGotoStmt - This represents an indirect goto. /// class IndirectGotoStmt : public Stmt { SourceLocation GotoLoc; SourceLocation StarLoc; Stmt *Target; public: IndirectGotoStmt(SourceLocation gotoLoc, SourceLocation starLoc, Expr *target) : Stmt(IndirectGotoStmtClass), GotoLoc(gotoLoc), StarLoc(starLoc), Target((Stmt*)target) {} /// \brief Build an empty indirect goto statement. explicit IndirectGotoStmt(EmptyShell Empty) : Stmt(IndirectGotoStmtClass, Empty) { } void setGotoLoc(SourceLocation L) { GotoLoc = L; } SourceLocation getGotoLoc() const { return GotoLoc; } void setStarLoc(SourceLocation L) { StarLoc = L; } SourceLocation getStarLoc() const { return StarLoc; } Expr *getTarget() { return reinterpret_cast<Expr*>(Target); } const Expr *getTarget() const {return reinterpret_cast<const Expr*>(Target);} void setTarget(Expr *E) { Target = reinterpret_cast<Stmt*>(E); } /// getConstantTarget - Returns the fixed target of this indirect /// goto, if one exists. LabelDecl *getConstantTarget(); const LabelDecl *getConstantTarget() const { return const_cast<IndirectGotoStmt*>(this)->getConstantTarget(); } SourceLocation getLocStart() const LLVM_READONLY { return GotoLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return Target->getLocEnd(); } static bool classof(const Stmt *T) { return T->getStmtClass() == IndirectGotoStmtClass; } // Iterators child_range children() { return child_range(&Target, &Target+1); } }; /// ContinueStmt - This represents a continue. /// class ContinueStmt : public Stmt { SourceLocation ContinueLoc; public: ContinueStmt(SourceLocation CL) : Stmt(ContinueStmtClass), ContinueLoc(CL) {} /// \brief Build an empty continue statement. explicit ContinueStmt(EmptyShell Empty) : Stmt(ContinueStmtClass, Empty) { } SourceLocation getContinueLoc() const { return ContinueLoc; } void setContinueLoc(SourceLocation L) { ContinueLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return ContinueLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return ContinueLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ContinueStmtClass; } // Iterators child_range children() { return child_range(); } }; /// BreakStmt - This represents a break. /// class BreakStmt : public Stmt { SourceLocation BreakLoc; public: BreakStmt(SourceLocation BL) : Stmt(BreakStmtClass), BreakLoc(BL) { static_assert(sizeof(BreakStmt) == 2 * sizeof(SourceLocation), "BreakStmt too large"); } /// \brief Build an empty break statement. explicit BreakStmt(EmptyShell Empty) : Stmt(BreakStmtClass, Empty) { } SourceLocation getBreakLoc() const { return BreakLoc; } void setBreakLoc(SourceLocation L) { BreakLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return BreakLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return BreakLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == BreakStmtClass; } // Iterators child_range children() { return child_range(); } }; /// ReturnStmt - This represents a return, optionally of an expression: /// return; /// return 4; /// /// Note that GCC allows return with no argument in a function declared to /// return a value, and it allows returning a value in functions declared to /// return void. We explicitly model this in the AST, which means you can't /// depend on the return type of the function and the presence of an argument. /// class ReturnStmt : public Stmt { SourceLocation RetLoc; Stmt *RetExpr; const VarDecl *NRVOCandidate; public: explicit ReturnStmt(SourceLocation RL) : ReturnStmt(RL, nullptr, nullptr) {} ReturnStmt(SourceLocation RL, Expr *E, const VarDecl *NRVOCandidate) : Stmt(ReturnStmtClass), RetLoc(RL), RetExpr((Stmt *)E), NRVOCandidate(NRVOCandidate) {} /// \brief Build an empty return expression. explicit ReturnStmt(EmptyShell Empty) : Stmt(ReturnStmtClass, Empty) { } const Expr *getRetValue() const; Expr *getRetValue(); void setRetValue(Expr *E) { RetExpr = reinterpret_cast<Stmt*>(E); } SourceLocation getReturnLoc() const { return RetLoc; } void setReturnLoc(SourceLocation L) { RetLoc = L; } /// \brief Retrieve the variable that might be used for the named return /// value optimization. /// /// The optimization itself can only be performed if the variable is /// also marked as an NRVO object. const VarDecl *getNRVOCandidate() const { return NRVOCandidate; } void setNRVOCandidate(const VarDecl *Var) { NRVOCandidate = Var; } SourceLocation getLocStart() const LLVM_READONLY { return RetLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RetExpr ? RetExpr->getLocEnd() : RetLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == ReturnStmtClass; } // Iterators child_range children() { if (RetExpr) return child_range(&RetExpr, &RetExpr+1); return child_range(); } }; /// AsmStmt is the base class for GCCAsmStmt and MSAsmStmt. /// class AsmStmt : public Stmt { protected: SourceLocation AsmLoc; /// \brief True if the assembly statement does not have any input or output /// operands. bool IsSimple; /// \brief If true, treat this inline assembly as having side effects. /// This assembly statement should not be optimized, deleted or moved. bool IsVolatile; unsigned NumOutputs; unsigned NumInputs; unsigned NumClobbers; Stmt **Exprs; AsmStmt(StmtClass SC, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, unsigned numclobbers) : Stmt (SC), AsmLoc(asmloc), IsSimple(issimple), IsVolatile(isvolatile), NumOutputs(numoutputs), NumInputs(numinputs), NumClobbers(numclobbers) { } friend class ASTStmtReader; public: /// \brief Build an empty inline-assembly statement. explicit AsmStmt(StmtClass SC, EmptyShell Empty) : Stmt(SC, Empty), Exprs(nullptr) { } SourceLocation getAsmLoc() const { return AsmLoc; } void setAsmLoc(SourceLocation L) { AsmLoc = L; } bool isSimple() const { return IsSimple; } void setSimple(bool V) { IsSimple = V; } bool isVolatile() const { return IsVolatile; } void setVolatile(bool V) { IsVolatile = V; } SourceLocation getLocStart() const LLVM_READONLY { return SourceLocation(); } SourceLocation getLocEnd() const LLVM_READONLY { return SourceLocation(); } //===--- Asm String Analysis ---===// /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// unsigned getNumOutputs() const { return NumOutputs; } /// getOutputConstraint - Return the constraint string for the specified /// output operand. All output constraints are known to be non-empty (either /// '=' or '+'). StringRef getOutputConstraint(unsigned i) const; /// isOutputPlusConstraint - Return true if the specified output constraint /// is a "+" constraint (which is both an input and an output) or false if it /// is an "=" constraint (just an output). bool isOutputPlusConstraint(unsigned i) const { return getOutputConstraint(i)[0] == '+'; } const Expr *getOutputExpr(unsigned i) const; /// getNumPlusOperands - Return the number of output operands that have a "+" /// constraint. unsigned getNumPlusOperands() const; //===--- Input operands ---===// unsigned getNumInputs() const { return NumInputs; } /// getInputConstraint - Return the specified input constraint. Unlike output /// constraints, these can be empty. StringRef getInputConstraint(unsigned i) const; const Expr *getInputExpr(unsigned i) const; //===--- Other ---===// unsigned getNumClobbers() const { return NumClobbers; } StringRef getClobber(unsigned i) const; static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass || T->getStmtClass() == MSAsmStmtClass; } // Input expr iterators. typedef ExprIterator inputs_iterator; typedef ConstExprIterator const_inputs_iterator; typedef llvm::iterator_range<inputs_iterator> inputs_range; typedef llvm::iterator_range<const_inputs_iterator> inputs_const_range; inputs_iterator begin_inputs() { return &Exprs[0] + NumOutputs; } inputs_iterator end_inputs() { return &Exprs[0] + NumOutputs + NumInputs; } inputs_range inputs() { return inputs_range(begin_inputs(), end_inputs()); } const_inputs_iterator begin_inputs() const { return &Exprs[0] + NumOutputs; } const_inputs_iterator end_inputs() const { return &Exprs[0] + NumOutputs + NumInputs; } inputs_const_range inputs() const { return inputs_const_range(begin_inputs(), end_inputs()); } // Output expr iterators. typedef ExprIterator outputs_iterator; typedef ConstExprIterator const_outputs_iterator; typedef llvm::iterator_range<outputs_iterator> outputs_range; typedef llvm::iterator_range<const_outputs_iterator> outputs_const_range; outputs_iterator begin_outputs() { return &Exprs[0]; } outputs_iterator end_outputs() { return &Exprs[0] + NumOutputs; } outputs_range outputs() { return outputs_range(begin_outputs(), end_outputs()); } const_outputs_iterator begin_outputs() const { return &Exprs[0]; } const_outputs_iterator end_outputs() const { return &Exprs[0] + NumOutputs; } outputs_const_range outputs() const { return outputs_const_range(begin_outputs(), end_outputs()); } child_range children() { return child_range(&Exprs[0], &Exprs[0] + NumOutputs + NumInputs); } }; /// This represents a GCC inline-assembly statement extension. /// class GCCAsmStmt : public AsmStmt { SourceLocation RParenLoc; StringLiteral *AsmStr; // FIXME: If we wanted to, we could allocate all of these in one big array. StringLiteral **Constraints; StringLiteral **Clobbers; IdentifierInfo **Names; friend class ASTStmtReader; public: GCCAsmStmt(const ASTContext &C, SourceLocation asmloc, bool issimple, bool isvolatile, unsigned numoutputs, unsigned numinputs, IdentifierInfo **names, StringLiteral **constraints, Expr **exprs, StringLiteral *asmstr, unsigned numclobbers, StringLiteral **clobbers, SourceLocation rparenloc); /// \brief Build an empty inline-assembly statement. explicit GCCAsmStmt(EmptyShell Empty) : AsmStmt(GCCAsmStmtClass, Empty), Constraints(nullptr), Clobbers(nullptr), Names(nullptr) { } SourceLocation getRParenLoc() const { return RParenLoc; } void setRParenLoc(SourceLocation L) { RParenLoc = L; } //===--- Asm String Analysis ---===// const StringLiteral *getAsmString() const { return AsmStr; } StringLiteral *getAsmString() { return AsmStr; } void setAsmString(StringLiteral *E) { AsmStr = E; } /// AsmStringPiece - this is part of a decomposed asm string specification /// (for use with the AnalyzeAsmString function below). An asm string is /// considered to be a concatenation of these parts. class AsmStringPiece { public: enum Kind { String, // String in .ll asm string form, "$" -> "$$" and "%%" -> "%". Operand // Operand reference, with optional modifier %c4. }; private: Kind MyKind; std::string Str; unsigned OperandNo; // Source range for operand references. CharSourceRange Range; public: AsmStringPiece(const std::string &S) : MyKind(String), Str(S) {} AsmStringPiece(unsigned OpNo, const std::string &S, SourceLocation Begin, SourceLocation End) : MyKind(Operand), Str(S), OperandNo(OpNo), Range(CharSourceRange::getCharRange(Begin, End)) { } bool isString() const { return MyKind == String; } bool isOperand() const { return MyKind == Operand; } const std::string &getString() const { return Str; } unsigned getOperandNo() const { assert(isOperand()); return OperandNo; } CharSourceRange getRange() const { assert(isOperand() && "Range is currently used only for Operands."); return Range; } /// getModifier - Get the modifier for this operand, if present. This /// returns '\0' if there was no modifier. char getModifier() const; }; /// AnalyzeAsmString - Analyze the asm string of the current asm, decomposing /// it into pieces. If the asm string is erroneous, emit errors and return /// true, otherwise return false. This handles canonicalization and /// translation of strings from GCC syntax to LLVM IR syntax, and handles //// flattening of named references like %[foo] to Operand AsmStringPiece's. unsigned AnalyzeAsmString(SmallVectorImpl<AsmStringPiece> &Pieces, const ASTContext &C, unsigned &DiagOffs) const; /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// IdentifierInfo *getOutputIdentifier(unsigned i) const { return Names[i]; } StringRef getOutputName(unsigned i) const { if (IdentifierInfo *II = getOutputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getOutputConstraint(unsigned i) const; const StringLiteral *getOutputConstraintLiteral(unsigned i) const { return Constraints[i]; } StringLiteral *getOutputConstraintLiteral(unsigned i) { return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// IdentifierInfo *getInputIdentifier(unsigned i) const { return Names[i + NumOutputs]; } StringRef getInputName(unsigned i) const { if (IdentifierInfo *II = getInputIdentifier(i)) return II->getName(); return StringRef(); } StringRef getInputConstraint(unsigned i) const; const StringLiteral *getInputConstraintLiteral(unsigned i) const { return Constraints[i + NumOutputs]; } StringLiteral *getInputConstraintLiteral(unsigned i) { return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<GCCAsmStmt*>(this)->getInputExpr(i); } private: void setOutputsAndInputsAndClobbers(const ASTContext &C, IdentifierInfo **Names, StringLiteral **Constraints, Stmt **Exprs, unsigned NumOutputs, unsigned NumInputs, StringLiteral **Clobbers, unsigned NumClobbers); public: //===--- Other ---===// /// getNamedOperand - Given a symbolic operand reference like %[foo], /// translate this into a numeric value needed to reference the same operand. /// This returns -1 if the operand name is invalid. int getNamedOperand(StringRef SymbolicName) const; StringRef getClobber(unsigned i) const; StringLiteral *getClobberStringLiteral(unsigned i) { return Clobbers[i]; } const StringLiteral *getClobberStringLiteral(unsigned i) const { return Clobbers[i]; } SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return RParenLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == GCCAsmStmtClass; } }; /// This represents a Microsoft inline-assembly statement extension. /// class MSAsmStmt : public AsmStmt { SourceLocation LBraceLoc, EndLoc; StringRef AsmStr; unsigned NumAsmToks; Token *AsmToks; StringRef *Constraints; StringRef *Clobbers; friend class ASTStmtReader; public: MSAsmStmt(const ASTContext &C, SourceLocation asmloc, SourceLocation lbraceloc, bool issimple, bool isvolatile, ArrayRef<Token> asmtoks, unsigned numoutputs, unsigned numinputs, ArrayRef<StringRef> constraints, ArrayRef<Expr*> exprs, StringRef asmstr, ArrayRef<StringRef> clobbers, SourceLocation endloc); /// \brief Build an empty MS-style inline-assembly statement. explicit MSAsmStmt(EmptyShell Empty) : AsmStmt(MSAsmStmtClass, Empty), NumAsmToks(0), AsmToks(nullptr), Constraints(nullptr), Clobbers(nullptr) { } SourceLocation getLBraceLoc() const { return LBraceLoc; } void setLBraceLoc(SourceLocation L) { LBraceLoc = L; } SourceLocation getEndLoc() const { return EndLoc; } void setEndLoc(SourceLocation L) { EndLoc = L; } bool hasBraces() const { return LBraceLoc.isValid(); } unsigned getNumAsmToks() { return NumAsmToks; } Token *getAsmToks() { return AsmToks; } //===--- Asm String Analysis ---===// StringRef getAsmString() const { return AsmStr; } /// Assemble final IR asm string. std::string generateAsmString(const ASTContext &C) const; //===--- Output operands ---===// StringRef getOutputConstraint(unsigned i) const { assert(i < NumOutputs); return Constraints[i]; } Expr *getOutputExpr(unsigned i); const Expr *getOutputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getOutputExpr(i); } //===--- Input operands ---===// StringRef getInputConstraint(unsigned i) const { assert(i < NumInputs); return Constraints[i + NumOutputs]; } Expr *getInputExpr(unsigned i); void setInputExpr(unsigned i, Expr *E); const Expr *getInputExpr(unsigned i) const { return const_cast<MSAsmStmt*>(this)->getInputExpr(i); } //===--- Other ---===// ArrayRef<StringRef> getAllConstraints() const { return llvm::makeArrayRef(Constraints, NumInputs + NumOutputs); } ArrayRef<StringRef> getClobbers() const { return llvm::makeArrayRef(Clobbers, NumClobbers); } ArrayRef<Expr*> getAllExprs() const { return llvm::makeArrayRef(reinterpret_cast<Expr**>(Exprs), NumInputs + NumOutputs); } StringRef getClobber(unsigned i) const { return getClobbers()[i]; } private: void initialize(const ASTContext &C, StringRef AsmString, ArrayRef<Token> AsmToks, ArrayRef<StringRef> Constraints, ArrayRef<Expr*> Exprs, ArrayRef<StringRef> Clobbers); public: SourceLocation getLocStart() const LLVM_READONLY { return AsmLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return EndLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == MSAsmStmtClass; } child_range children() { return child_range(&Exprs[0], &Exprs[NumInputs + NumOutputs]); } }; class SEHExceptStmt : public Stmt { SourceLocation Loc; Stmt *Children[2]; enum { FILTER_EXPR, BLOCK }; SEHExceptStmt(SourceLocation Loc, Expr *FilterExpr, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHExceptStmt(EmptyShell E) : Stmt(SEHExceptStmtClass, E) { } public: static SEHExceptStmt* Create(const ASTContext &C, SourceLocation ExceptLoc, Expr *FilterExpr, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getExceptLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getExceptLoc() const { return Loc; } SourceLocation getEndLoc() const { return getBlock()->getLocEnd(); } Expr *getFilterExpr() const { return reinterpret_cast<Expr*>(Children[FILTER_EXPR]); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Children[BLOCK]); } child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHExceptStmtClass; } }; class SEHFinallyStmt : public Stmt { SourceLocation Loc; Stmt *Block; SEHFinallyStmt(SourceLocation Loc, Stmt *Block); friend class ASTReader; friend class ASTStmtReader; explicit SEHFinallyStmt(EmptyShell E) : Stmt(SEHFinallyStmtClass, E) { } public: static SEHFinallyStmt* Create(const ASTContext &C, SourceLocation FinallyLoc, Stmt *Block); SourceLocation getLocStart() const LLVM_READONLY { return getFinallyLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getFinallyLoc() const { return Loc; } SourceLocation getEndLoc() const { return Block->getLocEnd(); } CompoundStmt *getBlock() const { return cast<CompoundStmt>(Block); } child_range children() { return child_range(&Block,&Block+1); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHFinallyStmtClass; } }; class SEHTryStmt : public Stmt { bool IsCXXTry; SourceLocation TryLoc; Stmt *Children[2]; enum { TRY = 0, HANDLER = 1 }; SEHTryStmt(bool isCXXTry, // true if 'try' otherwise '__try' SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); friend class ASTReader; friend class ASTStmtReader; explicit SEHTryStmt(EmptyShell E) : Stmt(SEHTryStmtClass, E) { } public: static SEHTryStmt* Create(const ASTContext &C, bool isCXXTry, SourceLocation TryLoc, Stmt *TryBlock, Stmt *Handler); SourceLocation getLocStart() const LLVM_READONLY { return getTryLoc(); } SourceLocation getLocEnd() const LLVM_READONLY { return getEndLoc(); } SourceLocation getTryLoc() const { return TryLoc; } SourceLocation getEndLoc() const { return Children[HANDLER]->getLocEnd(); } bool getIsCXXTry() const { return IsCXXTry; } CompoundStmt* getTryBlock() const { return cast<CompoundStmt>(Children[TRY]); } Stmt *getHandler() const { return Children[HANDLER]; } /// Returns 0 if not defined SEHExceptStmt *getExceptHandler() const; SEHFinallyStmt *getFinallyHandler() const; child_range children() { return child_range(Children,Children+2); } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHTryStmtClass; } }; /// Represents a __leave statement. /// class SEHLeaveStmt : public Stmt { SourceLocation LeaveLoc; public: explicit SEHLeaveStmt(SourceLocation LL) : Stmt(SEHLeaveStmtClass), LeaveLoc(LL) {} /// \brief Build an empty __leave statement. explicit SEHLeaveStmt(EmptyShell Empty) : Stmt(SEHLeaveStmtClass, Empty) { } SourceLocation getLeaveLoc() const { return LeaveLoc; } void setLeaveLoc(SourceLocation L) { LeaveLoc = L; } SourceLocation getLocStart() const LLVM_READONLY { return LeaveLoc; } SourceLocation getLocEnd() const LLVM_READONLY { return LeaveLoc; } static bool classof(const Stmt *T) { return T->getStmtClass() == SEHLeaveStmtClass; } // Iterators child_range children() { return child_range(); } }; /// \brief This captures a statement into a function. For example, the following /// pragma annotated compound statement can be represented as a CapturedStmt, /// and this compound statement is the body of an anonymous outlined function. /// @code /// #pragma omp parallel /// { /// compute(); /// } /// @endcode class CapturedStmt : public Stmt { public: /// \brief The different capture forms: by 'this', by reference, capture for /// variable-length array type etc. enum VariableCaptureKind { VCK_This, VCK_ByRef, VCK_VLAType, }; /// \brief Describes the capture of either a variable, or 'this', or /// variable-length array type. class Capture { llvm::PointerIntPair<VarDecl *, 2, VariableCaptureKind> VarAndKind; SourceLocation Loc; public: /// \brief Create a new capture. /// /// \param Loc The source location associated with this capture. /// /// \param Kind The kind of capture (this, ByRef, ...). /// /// \param Var The variable being captured, or null if capturing this. /// Capture(SourceLocation Loc, VariableCaptureKind Kind, VarDecl *Var = nullptr) : VarAndKind(Var, Kind), Loc(Loc) { switch (Kind) { case VCK_This: assert(!Var && "'this' capture cannot have a variable!"); break; case VCK_ByRef: assert(Var && "capturing by reference must have a variable!"); break; case VCK_VLAType: assert(!Var && "Variable-length array type capture cannot have a variable!"); break; } } /// \brief Determine the kind of capture. VariableCaptureKind getCaptureKind() const { return VarAndKind.getInt(); } /// \brief Retrieve the source location at which the variable or 'this' was /// first used. SourceLocation getLocation() const { return Loc; } /// \brief Determine whether this capture handles the C++ 'this' pointer. bool capturesThis() const { return getCaptureKind() == VCK_This; } /// \brief Determine whether this capture handles a variable. bool capturesVariable() const { return getCaptureKind() == VCK_ByRef; } /// \brief Determine whether this capture handles a variable-length array /// type. bool capturesVariableArrayType() const { return getCaptureKind() == VCK_VLAType; } /// \brief Retrieve the declaration of the variable being captured. /// /// This operation is only valid if this capture captures a variable. VarDecl *getCapturedVar() const { assert(capturesVariable() && "No variable available for 'this' or VAT capture"); return VarAndKind.getPointer(); } friend class ASTStmtReader; }; private: /// \brief The number of variable captured, including 'this'. unsigned NumCaptures; /// \brief The pointer part is the implicit the outlined function and the /// int part is the captured region kind, 'CR_Default' etc. llvm::PointerIntPair<CapturedDecl *, 1, CapturedRegionKind> CapDeclAndKind; /// \brief The record for captured variables, a RecordDecl or CXXRecordDecl. RecordDecl *TheRecordDecl; /// \brief Construct a captured statement. CapturedStmt(Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); /// \brief Construct an empty captured statement. CapturedStmt(EmptyShell Empty, unsigned NumCaptures); Stmt **getStoredStmts() const { return reinterpret_cast<Stmt **>(const_cast<CapturedStmt *>(this) + 1); } Capture *getStoredCaptures() const; void setCapturedStmt(Stmt *S) { getStoredStmts()[NumCaptures] = S; } public: static CapturedStmt *Create(const ASTContext &Context, Stmt *S, CapturedRegionKind Kind, ArrayRef<Capture> Captures, ArrayRef<Expr *> CaptureInits, CapturedDecl *CD, RecordDecl *RD); static CapturedStmt *CreateDeserialized(const ASTContext &Context, unsigned NumCaptures); /// \brief Retrieve the statement being captured. Stmt *getCapturedStmt() { return getStoredStmts()[NumCaptures]; } const Stmt *getCapturedStmt() const { return const_cast<CapturedStmt *>(this)->getCapturedStmt(); } /// \brief Retrieve the outlined function declaration. CapturedDecl *getCapturedDecl() { return CapDeclAndKind.getPointer(); } const CapturedDecl *getCapturedDecl() const { return const_cast<CapturedStmt *>(this)->getCapturedDecl(); } /// \brief Set the outlined function declaration. void setCapturedDecl(CapturedDecl *D) { assert(D && "null CapturedDecl"); CapDeclAndKind.setPointer(D); } /// \brief Retrieve the captured region kind. CapturedRegionKind getCapturedRegionKind() const { return CapDeclAndKind.getInt(); } /// \brief Set the captured region kind. void setCapturedRegionKind(CapturedRegionKind Kind) { CapDeclAndKind.setInt(Kind); } /// \brief Retrieve the record declaration for captured variables. const RecordDecl *getCapturedRecordDecl() const { return TheRecordDecl; } /// \brief Set the record declaration for captured variables. void setCapturedRecordDecl(RecordDecl *D) { assert(D && "null RecordDecl"); TheRecordDecl = D; } /// \brief True if this variable has been captured. bool capturesVariable(const VarDecl *Var) const; /// \brief An iterator that walks over the captures. typedef Capture *capture_iterator; typedef const Capture *const_capture_iterator; typedef llvm::iterator_range<capture_iterator> capture_range; typedef llvm::iterator_range<const_capture_iterator> capture_const_range; capture_range captures() { return capture_range(capture_begin(), capture_end()); } capture_const_range captures() const { return capture_const_range(capture_begin(), capture_end()); } /// \brief Retrieve an iterator pointing to the first capture. capture_iterator capture_begin() { return getStoredCaptures(); } const_capture_iterator capture_begin() const { return getStoredCaptures(); } /// \brief Retrieve an iterator pointing past the end of the sequence of /// captures. capture_iterator capture_end() const { return getStoredCaptures() + NumCaptures; } /// \brief Retrieve the number of captures, including 'this'. unsigned capture_size() const { return NumCaptures; } /// \brief Iterator that walks over the capture initialization arguments. typedef Expr **capture_init_iterator; typedef llvm::iterator_range<capture_init_iterator> capture_init_range; capture_init_range capture_inits() const { return capture_init_range(capture_init_begin(), capture_init_end()); } /// \brief Retrieve the first initialization argument. capture_init_iterator capture_init_begin() const { return reinterpret_cast<Expr **>(getStoredStmts()); } /// \brief Retrieve the iterator pointing one past the last initialization /// argument. capture_init_iterator capture_init_end() const { return capture_init_begin() + NumCaptures; } SourceLocation getLocStart() const LLVM_READONLY { return getCapturedStmt()->getLocStart(); } SourceLocation getLocEnd() const LLVM_READONLY { return getCapturedStmt()->getLocEnd(); } SourceRange getSourceRange() const LLVM_READONLY { return getCapturedStmt()->getSourceRange(); } static bool classof(const Stmt *T) { return T->getStmtClass() == CapturedStmtClass; } child_range children(); friend class ASTStmtReader; }; } // end namespace clang #endif
GB_unop__identity_uint8_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_uint8_fp64) // op(A') function: GB (_unop_tran__identity_uint8_fp64) // C type: uint8_t // A type: double // cast: uint8_t cij = GB_cast_to_uint8_t ((double) (aij)) // unaryop: cij = aij #define GB_ATYPE \ double #define GB_CTYPE \ uint8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_uint8_fp64) ( uint8_t *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_uint8_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
BIDMat_SPBLAS.c
#include <omp.h> #include <jni.h> #include <string.h> #ifndef __INTEL_COMPILER #include <cblas.h> #else #include <mkl.h> #include <mkl_spblas.h> JNIEXPORT jint JNICALL Java_edu_berkeley_bid_SPBLAS_scsrmm (JNIEnv * env, jobject calling_obj, jstring j_transa, jint m, jint n, jint k, jfloat alpha, jstring j_matdescra, jfloatArray j_vals, jintArray j_ir, jintArray j_jc, jfloatArray j_b, jint ldb, jfloat beta, jfloatArray j_c, jint ldc){ char * transa = (char *)(*env)->GetStringUTFChars(env, j_transa, 0); char * matdescra = (char *)(*env)->GetStringUTFChars(env, j_matdescra, 0); jfloat * vals = (*env)->GetPrimitiveArrayCritical(env, j_vals, 0); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, 0); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, 0); jfloat * b = (*env)->GetPrimitiveArrayCritical(env, j_b, 0); jfloat * c = (*env)->GetPrimitiveArrayCritical(env, j_c, 0); jint returnValue = 0; if (transa != NULL && matdescra != NULL && vals != NULL && ir != NULL && jc != NULL && b != NULL && c != NULL) { mkl_scsrmm(transa, &m, &n, &k, &alpha, matdescra, vals, ir, jc, jc+1, b, &ldb, &beta, c, &ldc); } else { returnValue = 1; } (*env)->ReleasePrimitiveArrayCritical(env, j_c, c, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_b, b, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_vals, vals, 0); (*env)->ReleaseStringUTFChars(env, j_matdescra, matdescra); (*env)->ReleaseStringUTFChars(env, j_transa, transa); return returnValue; }; JNIEXPORT jint JNICALL Java_edu_berkeley_bid_SPBLAS_scscmm (JNIEnv * env, jobject calling_obj, jstring j_transa, jint m, jint n, jint k, jfloat alpha, jstring j_matdescra, jfloatArray j_vals, jintArray j_ir, jintArray j_jc, jfloatArray j_b, jint ldb, jfloat beta, jfloatArray j_c, jint ldc){ char * transa = (char *)(*env)->GetStringUTFChars(env, j_transa, 0); char * matdescra = (char *)(*env)->GetStringUTFChars(env, j_matdescra, 0); jfloat * vals = (*env)->GetPrimitiveArrayCritical(env, j_vals, 0); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, 0); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, 0); jfloat * b = (*env)->GetPrimitiveArrayCritical(env, j_b, 0); jfloat * c = (*env)->GetPrimitiveArrayCritical(env, j_c, 0); jint returnValue = 0; if (transa != NULL && matdescra != NULL && vals != NULL && ir != NULL && jc != NULL && b != NULL && c != NULL) { mkl_scscmm(transa, &m, &n, &k, &alpha, matdescra, vals, ir, jc, jc+1, b, &ldb, &beta, c, &ldc); } else { returnValue = 1; } (*env)->ReleasePrimitiveArrayCritical(env, j_c, c, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_b, b, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_vals, vals, 0); (*env)->ReleaseStringUTFChars(env, j_matdescra, matdescra); (*env)->ReleaseStringUTFChars(env, j_transa, transa); return returnValue; }; JNIEXPORT jint JNICALL Java_edu_berkeley_bid_SPBLAS_scscmv (JNIEnv * env, jobject calling_obj, jstring j_transa, jint m, jint k, jfloat alpha, jstring j_matdescra, jfloatArray j_vals, jintArray j_ir, jintArray j_jc, jfloatArray j_x, jfloat beta, jfloatArray j_y){ char * transa = (char *)(*env)->GetStringUTFChars(env, j_transa, 0); char * matdescra = (char *)(*env)->GetStringUTFChars(env, j_matdescra, 0); jfloat * vals = (*env)->GetPrimitiveArrayCritical(env, j_vals, 0); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, 0); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, 0); jfloat * x = (*env)->GetPrimitiveArrayCritical(env, j_x, 0); jfloat * y = (*env)->GetPrimitiveArrayCritical(env, j_y, 0); jint returnValue = 0; if (transa != NULL && matdescra != NULL && vals != NULL && ir != NULL && jc != NULL && x != NULL && y != NULL) { MKL_SCSCMV(transa, &m, &k, &alpha, matdescra, vals, ir, jc, jc+1, x, &beta, y); } else { returnValue = 1; } (*env)->ReleasePrimitiveArrayCritical(env, j_y, y, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_x, x, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_vals, vals, 0); (*env)->ReleaseStringUTFChars(env, j_matdescra, matdescra); (*env)->ReleaseStringUTFChars(env, j_transa, transa); return returnValue; }; JNIEXPORT jint JNICALL Java_edu_berkeley_bid_SPBLAS_scoomv (JNIEnv * env, jobject calling_obj, jstring j_transa, jint m, jint k, jfloat alpha, jstring j_matdescra, jfloatArray j_vals, jintArray j_irows, jintArray j_icols, jint nnz, jfloatArray j_x, jfloat beta, jfloatArray j_y){ char * transa = (char *)(*env)->GetStringUTFChars(env, j_transa, 0); char * matdescra = (char *)(*env)->GetStringUTFChars(env, j_matdescra, 0); jfloat * vals = (*env)->GetPrimitiveArrayCritical(env, j_vals, 0); jint * irows = (*env)->GetPrimitiveArrayCritical(env, j_irows, 0); jint * icols = (*env)->GetPrimitiveArrayCritical(env, j_icols, 0); jfloat * x = (*env)->GetPrimitiveArrayCritical(env, j_x, 0); jfloat * y = (*env)->GetPrimitiveArrayCritical(env, j_y, 0); jint returnValue = 0; if (transa != NULL && matdescra != NULL && vals != NULL && irows != NULL && icols != NULL && x != NULL && y != NULL) { MKL_SCOOMV(transa, &m, &k, &alpha, matdescra, vals, irows, icols, &nnz, x, &beta, y); } else { returnValue = 1; } (*env)->ReleasePrimitiveArrayCritical(env, j_y, y, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_x, x, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_icols, icols, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_irows, irows, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_vals, vals, 0); (*env)->ReleaseStringUTFChars(env, j_matdescra, matdescra); (*env)->ReleaseStringUTFChars(env, j_transa, transa); return returnValue; }; JNIEXPORT jint JNICALL Java_edu_berkeley_bid_SPBLAS_scoomv1 (JNIEnv * env, jobject calling_obj, jstring j_transa, jint m, jint k, jfloat alpha, jstring j_matdescra, jfloatArray j_vals, jintArray j_inds, jint nnz, jfloatArray j_x, jfloat beta, jfloatArray j_y){ char * transa = (char *)(*env)->GetStringUTFChars(env, j_transa, 0); char * matdescra = (char *)(*env)->GetStringUTFChars(env, j_matdescra, 0); jfloat * vals = (*env)->GetPrimitiveArrayCritical(env, j_vals, 0); jint * inds = (*env)->GetPrimitiveArrayCritical(env, j_inds, 0); jfloat * x = (*env)->GetPrimitiveArrayCritical(env, j_x, 0); jfloat * y = (*env)->GetPrimitiveArrayCritical(env, j_y, 0); jint returnValue = 0; if (transa != NULL && matdescra != NULL && vals != NULL && inds != NULL && x != NULL && y != NULL) { MKL_SCOOMV(transa, &m, &k, &alpha, matdescra, vals, inds, inds+nnz, &nnz, x, &beta, y); } else { returnValue = 1; } (*env)->ReleasePrimitiveArrayCritical(env, j_y, y, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_x, x, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_inds, inds, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_vals, vals, 0); (*env)->ReleaseStringUTFChars(env, j_matdescra, matdescra); (*env)->ReleaseStringUTFChars(env, j_transa, transa); return returnValue; }; JNIEXPORT jint JNICALL Java_edu_berkeley_bid_SPBLAS_scsrmv (JNIEnv * env, jobject calling_obj, jstring j_transa, jint m, jint k, jfloat alpha, jstring j_matdescra, jfloatArray j_vals, jintArray j_ir, jintArray j_jc, jfloatArray j_x, jfloat beta, jfloatArray j_y){ char * transa = (char *)(*env)->GetStringUTFChars(env, j_transa, 0); char * matdescra = (char *)(*env)->GetStringUTFChars(env, j_matdescra, 0); jfloat * vals = (*env)->GetPrimitiveArrayCritical(env, j_vals, 0); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, 0); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, 0); jfloat * x = (*env)->GetPrimitiveArrayCritical(env, j_x, 0); jfloat * y = (*env)->GetPrimitiveArrayCritical(env, j_y, 0); jint returnValue; if (transa != NULL && matdescra != NULL && vals != NULL && ir != NULL && jc != NULL && x != NULL && y != NULL) { MKL_SCSRMV(transa, &m, &k, &alpha, matdescra, vals, ir, jc, jc+1, x, &beta, y); } else { returnValue = 1; } (*env)->ReleasePrimitiveArrayCritical(env, j_y, y, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_x, x, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_vals, vals, 0); (*env)->ReleaseStringUTFChars(env, j_matdescra, matdescra); (*env)->ReleaseStringUTFChars(env, j_transa, transa); return returnValue; }; JNIEXPORT jint JNICALL Java_edu_berkeley_bid_SPBLAS_dcsrmm (JNIEnv * env, jobject calling_obj, jstring j_transa, jint m, jint n, jint k, jdouble alpha, jstring j_matdescra, jdoubleArray j_vals, jintArray j_ir, jintArray j_jc, jdoubleArray j_b, jint ldb, jdouble beta, jdoubleArray j_c, jint ldc){ char * transa = (char *)(*env)->GetStringUTFChars(env, j_transa, 0); char * matdescra = (char *)(*env)->GetStringUTFChars(env, j_matdescra, 0); jdouble * vals = (*env)->GetPrimitiveArrayCritical(env, j_vals, 0); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, 0); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, 0); jdouble * b = (*env)->GetPrimitiveArrayCritical(env, j_b, 0); jdouble * c = (*env)->GetPrimitiveArrayCritical(env, j_c, 0); jint returnValue = 0; if (transa != NULL && matdescra != NULL && vals != NULL && ir != NULL && jc != NULL && b != NULL && c != NULL) { mkl_dcsrmm(transa, &m, &n, &k, &alpha, matdescra, vals, ir, jc, jc+1, b, &ldb, &beta, c, &ldc); } else { returnValue = 1; } (*env)->ReleasePrimitiveArrayCritical(env, j_c, c, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_b, b, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_vals, vals, 0); (*env)->ReleaseStringUTFChars(env, j_matdescra, matdescra); (*env)->ReleaseStringUTFChars(env, j_transa, transa); return returnValue; }; JNIEXPORT jint JNICALL Java_edu_berkeley_bid_SPBLAS_dcscmm (JNIEnv * env, jobject calling_obj, jstring j_transa, jint m, jint n, jint k, jdouble alpha, jstring j_matdescra, jdoubleArray j_vals, jintArray j_ir, jintArray j_jc, jdoubleArray j_b, jint ldb, jdouble beta, jdoubleArray j_c, jint ldc){ char * transa = (char *)(*env)->GetStringUTFChars(env, j_transa, 0); char * matdescra = (char *)(*env)->GetStringUTFChars(env, j_matdescra, 0); jdouble * vals = (*env)->GetPrimitiveArrayCritical(env, j_vals, 0); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, 0); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, 0); jdouble * b = (*env)->GetPrimitiveArrayCritical(env, j_b, 0); jdouble * c = (*env)->GetPrimitiveArrayCritical(env, j_c, 0); jint returnValue = 0; if (transa != NULL && matdescra != NULL && vals != NULL && ir != NULL && jc != NULL && b != NULL && c != NULL) { mkl_dcscmm(transa, &m, &n, &k, &alpha, matdescra, vals, ir, jc, jc+1, b, &ldb, &beta, c, &ldc); } else { returnValue = 1; } (*env)->ReleasePrimitiveArrayCritical(env, j_c, c, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_b, b, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_vals, vals, 0); (*env)->ReleaseStringUTFChars(env, j_matdescra, matdescra); (*env)->ReleaseStringUTFChars(env, j_transa, transa); return returnValue; }; JNIEXPORT jint JNICALL Java_edu_berkeley_bid_SPBLAS_dcscmv (JNIEnv * env, jobject calling_obj, jstring j_transa, jint m, jint k, jdouble alpha, jstring j_matdescra, jdoubleArray j_vals, jintArray j_ir, jintArray j_jc, jdoubleArray j_x, jdouble beta, jdoubleArray j_y){ char * transa = (char *)(*env)->GetStringUTFChars(env, j_transa, 0); char * matdescra = (char *)(*env)->GetStringUTFChars(env, j_matdescra, 0); jdouble * vals = (*env)->GetPrimitiveArrayCritical(env, j_vals, 0); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, 0); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, 0); jdouble * x = (*env)->GetPrimitiveArrayCritical(env, j_x, 0); jdouble * y = (*env)->GetPrimitiveArrayCritical(env, j_y, 0); jint returnValue = 0; if (transa != NULL && matdescra != NULL && vals != NULL && ir != NULL && jc != NULL && x != NULL && y != NULL) { MKL_DCSCMV(transa, &m, &k, &alpha, matdescra, vals, ir, jc, jc+1, x, &beta, y); } else { returnValue = 1; } (*env)->ReleasePrimitiveArrayCritical(env, j_y, y, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_x, x, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_vals, vals, 0); (*env)->ReleaseStringUTFChars(env, j_matdescra, matdescra); (*env)->ReleaseStringUTFChars(env, j_transa, transa); return returnValue; }; JNIEXPORT jint JNICALL Java_edu_berkeley_bid_SPBLAS_dcsrmv (JNIEnv * env, jobject calling_obj, jstring j_transa, jint m, jint k, jdouble alpha, jstring j_matdescra, jdoubleArray j_vals, jintArray j_ir, jintArray j_jc, jdoubleArray j_x, jdouble beta, jdoubleArray j_y){ char * transa = (char *)(*env)->GetStringUTFChars(env, j_transa, 0); char * matdescra = (char *)(*env)->GetStringUTFChars(env, j_matdescra, 0); jdouble * vals = (*env)->GetPrimitiveArrayCritical(env, j_vals, 0); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, 0); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, 0); jdouble * x = (*env)->GetPrimitiveArrayCritical(env, j_x, 0); jdouble * y = (*env)->GetPrimitiveArrayCritical(env, j_y, 0); jint returnValue = 0; if (transa != NULL && matdescra != NULL && vals != NULL && ir != NULL && jc != NULL && x != NULL && y != NULL) { MKL_DCSRMV(transa, &m, &k, &alpha, matdescra, vals, ir, jc, jc+1, x, &beta, y); } else { returnValue = 1; } (*env)->ReleasePrimitiveArrayCritical(env, j_y, y, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_x, x, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_vals, vals, 0); (*env)->ReleaseStringUTFChars(env, j_matdescra, matdescra); (*env)->ReleaseStringUTFChars(env, j_transa, transa); return returnValue; } #endif JNIEXPORT void JNICALL Java_edu_berkeley_bid_SPBLAS_dmcscm (JNIEnv * env, jobject calling_obj, jint M, jint N, jdoubleArray j_A, jint lda, jdoubleArray j_B, jintArray j_ir, jintArray j_jc, jdoubleArray j_C, jint ldc){ jdouble * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE); jdouble * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, JNI_FALSE); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, JNI_FALSE); jdouble * C = (*env)->GetPrimitiveArrayCritical(env, j_C, JNI_FALSE); int ioff = jc[0]; int i; #pragma omp parallel for for (i = 0; i < N; i++) { int j, k, ir0; double *Ap, *Cp, bv; for (j = jc[i]-ioff; j < jc[i+1]-ioff; j++) { ir0 = ir[j]-ioff; Ap = A+(ir0*lda); Cp = C+(i*ldc); bv = B[j]; for (k = 0; k < M; k++) { Cp[k] += bv * Ap[k]; } } } (*env)->ReleasePrimitiveArrayCritical(env, j_C, C, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_SPBLAS_dmcsrm (JNIEnv * env, jobject calling_obj, jint M, jint N, jdoubleArray j_A, jint lda, jdoubleArray j_B, jintArray j_ir, jintArray j_jc, jdoubleArray j_C, jint ldc){ jdouble * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE); jdouble * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, JNI_FALSE); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, JNI_FALSE); jdouble * C = (*env)->GetPrimitiveArrayCritical(env, j_C, JNI_FALSE); int ioff = jc[0]; int i; for (i = 0; i < N; i++) { int j, k, ir0; double *Ap, *Cp, bv; for (j = jc[i]-ioff; j < jc[i+1]-ioff; j++) { ir0 = ir[j]-ioff; Ap = A+(i*lda); Cp = C+(ir0*ldc); bv = B[j]; #pragma omp parallel for for (k = 0; k < M; k++) { Cp[k] += bv * Ap[k]; } } } (*env)->ReleasePrimitiveArrayCritical(env, j_C, C, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_SPBLAS_smcscm (JNIEnv * env, jobject calling_obj, jint M, jint N, jfloatArray j_A, jint lda, jfloatArray j_B, jintArray j_ir, jintArray j_jc, jfloatArray j_C, jint ldc){ jfloat * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE); jfloat * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, JNI_FALSE); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, JNI_FALSE); jfloat * C = (*env)->GetPrimitiveArrayCritical(env, j_C, JNI_FALSE); int ioff = jc[0]; int i; #pragma omp parallel for for (i = 0; i < N; i++) { int j, ir0, k; float *Ap, *Cp, bv; for (j = jc[i]-ioff; j < jc[i+1]-ioff; j++) { ir0 = ir[j]-ioff; Ap = A+(ir0*lda); Cp = C+(i*ldc); bv = B[j]; for (k = 0; k < M; k++) { Cp[k] += bv * Ap[k]; } } } (*env)->ReleasePrimitiveArrayCritical(env, j_C, C, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0); } JNIEXPORT void JNICALL Java_edu_berkeley_bid_SPBLAS_smcsrm (JNIEnv * env, jobject calling_obj, jint M, jint N, jfloatArray j_A, jint lda, jfloatArray j_B, jintArray j_ir, jintArray j_jc, jfloatArray j_C, jint ldc){ jfloat * A = (*env)->GetPrimitiveArrayCritical(env, j_A, JNI_FALSE); jfloat * B = (*env)->GetPrimitiveArrayCritical(env, j_B, JNI_FALSE); jint * ir = (*env)->GetPrimitiveArrayCritical(env, j_ir, JNI_FALSE); jint * jc = (*env)->GetPrimitiveArrayCritical(env, j_jc, JNI_FALSE); jfloat * C = (*env)->GetPrimitiveArrayCritical(env, j_C, JNI_FALSE); int ioff = jc[0]; int i; for (i = 0; i < N; i++) { int j, ir0, k; float *Ap, *Cp, Bj; for (j = jc[i]-ioff; j < jc[i+1]-ioff; j++) { ir0 = ir[j]-ioff; Ap = A + (i*lda); Cp = C + (ir0*ldc); Bj = B[j]; for (k = 0; k < M; k++) { Cp[k] += Bj * Ap[k]; } } } (*env)->ReleasePrimitiveArrayCritical(env, j_C, C, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_jc, jc, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_ir, ir, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_B, B, 0); (*env)->ReleasePrimitiveArrayCritical(env, j_A, A, 0); }
VecMath.h
/* * Copyright (C) 2013 Sergey Kosarevsky (sk@linderdaum.com) * Copyright (C) 2013 Viktor Latypov (vl@linderdaum.com) * Based on Linderdaum Engine http://www.linderdaum.com * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * 2. Redistributions in binary form must display the names 'Sergey Kosarevsky' and * 'Viktor Latypov'in the credits of the application, if such credits exist. * The authors of this work must be notified via email (sk@linderdaum.com) in * this case of redistribution. * * 3. Neither the name of copyright holders nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ``AS * IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL COPYRIGHT HOLDERS OR CONTRIBUTORS * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #pragma once #include <math.h> #include <cmath> /// Linderdaum Math Library namespace Math { #undef INFINITY const float INFINITY = 1e30f; const float EPSILON = 1e-8f; const float PI = 3.14159265358979323846f; const float PI2 = 2.0f * PI; const float TWOPI = PI2; const float HALFPI = PI / 2.0f; const float SQRT2 = 1.41421356f; const float DTOR = PI / 180.0f; const float RTOD = 180.0f / PI; const float LOGHALF = -0.693147f; // log(0.5) const float LOGHALFI = -1.442695f; // Inverse of log(0.5) inline float LAbs( float A ) { return ( A > 0.0f ) ? A : -A; } inline float LMin( float A, float B ) { return ( A < B ) ? A : B; } inline float LMax( float A, float B ) { return ( A > B ) ? A : B; } inline float DegToRad( float F ) { return F * DTOR; } inline float RadToDeg( float F ) { return F * RTOD; } template <typename T> void SwapValues( T& V1, T& V2 ) { T Tmp = V1; V1 = V2; V2 = Tmp; } template <typename T> T Clamp( const T Value, const T Min, const T Max ) { if ( Value > Max ) { return Max; } if ( Value < Min ) { return Min; } return Value; } template <typename T> T ClampPeriodic( const T Value, const T Min, const T Max ) { } #pragma region Misc functions usefull in texture generation template <class T> T Step( T a, T x ) { return ( T )( x >= a ); } template <class T> T Boxstep( T a, T b, T x ) { return Clamp( ( x - a ) / ( b - a ), 0, 1 ); } template <class T> T Pulse( T a, T b, T x ) { return ( T )( ( x >= a ) - ( x >= b ) ); } template <class T> T Cubic( T a ) { return a * a * ( 3 - 2 * a ); } template <class T, class FactorT> T Lerp( T a, T b, FactorT x ) { return ( a + x * ( b - a ) ); } inline int Floor( float a ) { return ( ( int )a - ( a < 0 && a != ( int )a ) ); } inline int Ceil( float a ) { return ( ( int )a + ( a > 0 && a != ( int )a ) ); } inline float Gamma( float a, float g ) { return powf( a, 1 / g ); } inline float Bias( float a, float b ) { return powf( a, logf( b ) * LOGHALFI ); } inline float Exposure( float l, float k ) { return ( 1 - expf( -l * k ) ); } inline float Gain( float a, float b ) { if ( a <= EPSILON ) { return 0; } if ( a >= 1.0f - EPSILON ) { return 1; } float p = ( logf( 1.0f - b ) * LOGHALFI ); return ( ( a < 0.5 ) ? powf( 2.0f * a, p ) : 1.0f - powf( 2.0f * ( 1.0f - a ), p ) ) * 0.5f; } /// Cubically-interpolated "smooth" transition from a to b inline float SmoothStep( float a, float b, float x ) { if ( x <= a ) { return 0; } if ( x >= b ) { return 1; } return Cubic( ( x - a ) / ( b - a ) ); } #pragma endregion /// Floating-point division remainder inline float Mod( float a, float b ) { a -= b * floor( a / b ); if ( a < 0 ) { a += b; } return a; } /// Division remainder (wrap around for negative values) inline int ModInt( int a, int b ) { int r = a % b; return ( r < 0 ) ? r + b : r; } inline bool IsPowerOf2( const int Num ) { return ( Num & ( Num - 1 ) ) == 0; } inline unsigned int GetNextPowerOf2( unsigned int Num ) { Num |= ( Num >> 1 ); Num |= ( Num >> 2 ); Num |= ( Num >> 4 ); Num |= ( Num >> 8 ); Num |= ( Num >> 16 ); return Num + 1; } inline bool IsMaskSet( int Value, int Mask ) { return ( Value & Mask ) == Mask; } inline bool IsBitSet( int Value, int BitNumber ) { int Mask = 1 << BitNumber; return ( Value & Mask ) == Mask; } inline int SetBit( int Value, int BitNumber ) { int Mask = 1 << BitNumber; return Value | Mask; } inline int IntPow( int A, int B ) { return static_cast<int>( pow( static_cast<float>( A ), B ) ); } template<class T> inline T Sign( T Val ) { return ( Val > 0 ) ? Val : -Val; } inline float fsign( float Val ) { return ( Val > 0.0f ) ? 1.0f : -1.0f; } inline float ClipAngleTo0_360( float Angle ) { /* double IntPart = 0; double ClampedPart = modf( static_cast<double>(Angle), &IntPart ); return static_cast<float>( ClampedPart * 360.0 ); */ return std::fmod( Angle, 360.0f ); } inline float ClipAngleTo180( float angle ) { return ClipAngleTo0_360( angle ) - 180.0f; } inline double Round( double x, int Digits ) { double Power = pow( 10.0, Digits ); return static_cast<double>( static_cast<int>( x * Power ) ) / Power; } // find angle1-angle2 clipping it to [0..360] inline float FindAngleDelta( float angle1, float angle2 ) { /* float delta = angle1 - angle2; delta = ClipAngleTo0_360( delta ); if ( delta > 180.0 ) { delta = delta - 360.0f; // invert delta } return delta; */ float From = ClipAngleTo0_360( angle2 ); float To = ClipAngleTo0_360( angle1 ); float Dist = To - From; if ( Dist < -180.0f ) { Dist += 360.0f; } else if ( Dist > 180.0f ) { Dist -= 360.0f; } return Dist; } } class LVector2; class LVector3; class LVector4; class LVector2i; inline LVector2 operator*( const float A, const LVector2& B ); inline LVector2i operator*( const int A, const LVector2i& B ); inline LVector3 operator*( const float A, const LVector3& B ); inline LVector3 operator/( const float A, const LVector3& B ); inline LVector4 operator*( const float A, const LVector4& B ); /// 2D vector class LVector2 { public: float x; float y; public: LVector2(): x( 0 ), y( 0 ) {}; LVector2( float lx, float ly ): x( lx ), y( ly ) {}; LVector2( int lx, int ly ): x( static_cast<float>( lx ) ), y( static_cast<float>( ly ) ) {}; explicit LVector2( const float lx ): x( lx ), y( lx ) {}; explicit LVector2( const LVector2i& Vec ); // // LVector2 // inline float operator[]( const int Index ) const { return ( &x )[Index]; }; inline float& operator[]( const int Index ) { return ( &x )[Index]; }; inline LVector2 operator-() const { return LVector2( -x, -y ); } inline LVector2 operator-( const LVector2& Vec ) const { return LVector2( x - Vec.x, y - Vec.y ); } inline LVector2 operator+( const LVector2& Vec ) const { return LVector2( x + Vec.x, y + Vec.y ); } inline LVector2 operator*( const float A ) const { return LVector2( x * A, y * A ); } inline LVector2 operator/( const float A ) const { return LVector2( x / A, y / A ); } inline LVector2& operator+=( const LVector2& Vec ) { x += Vec.x; y += Vec.y; return *this; } inline LVector2& operator-=( const LVector2& Vec ) { x -= Vec.x; y -= Vec.y; return *this; } inline LVector2& operator*=( const float A ) { x *= A; y *= A; return *this; } inline LVector2& operator*=( const LVector2& Vec ) { x *= Vec.x; y *= Vec.y; return *this; } inline bool operator == ( const LVector2& Vec ) const { return ( Vec.x == x ) && ( Vec.y == y ); } inline bool operator != ( const LVector2& Vec ) const { return ( Vec.x != x ) || ( Vec.y != y ); } inline const float* ToFloatPtr() const { return &x; }; inline float* ToFloatPtr() { return &x; }; /// Dot product inline float Dot( const LVector2& Vec ) const { return ( x * Vec.x + y * Vec.y ); } /// Interpolate vetween two vectors void Lerp( const LVector2& Vec1, const LVector2& Vec2, float Factor ) { if ( Factor <= 0.0f ) { ( *this ) = Vec1; } else if ( Factor >= 1.0f ) { ( *this ) = Vec2; } else { ( *this ) = Vec1 + Factor * ( Vec2 - Vec1 ); } } /// Safe 2d vector normalization void Normalize(); inline LVector2 GetNormalized() const { LVector2 Vec( *this ); Vec.Normalize(); return Vec; }; /// Calculate reflection vector (valid only for unit vectors) void Reflect( const LVector2& Normal ) { ( *this ) -= ( 2.0f * Dot( Normal ) ) * Normal; } /// Get reflection vector (valid only for unit vectors) inline LVector2 GetReflected( const LVector2& Normal ) const { LVector2 Vec( *this ); Vec.Reflect( Normal ); return Vec; } /// Get orthogonal vector inline LVector2 GetOrthogonal() const { return LVector2( -y, x ); } /// Euclidean length inline float Length() const { return sqrt( x * x + y * y ); }; /// Squared length (for faster distance comparison) inline float SqrLength() const { return x * x + y * y; }; }; /// 2D integer vector class LVector2i { public: int x; int y; public: LVector2i(): x( 0 ), y( 0 ) {}; LVector2i( int lx, int ly ): x( lx ), y( ly ) {}; explicit LVector2i( const int lx ): x( lx ), y( lx ) {}; // // LVector2i // inline int operator[]( const int Index ) const { return ( &x )[Index]; }; inline int& operator[]( const int Index ) { return ( &x )[Index]; }; inline LVector2i operator-( const LVector2i& Vec ) const { return LVector2i( x - Vec.x, y - Vec.y ); } inline LVector2i operator+( const LVector2i& Vec ) const { return LVector2i( x + Vec.x, y + Vec.y ); } inline bool operator==( const LVector2i& Vec ) const { return ( Vec.x == x ) && ( Vec.y == y ); } inline bool operator!=( const LVector2i& Vec ) const { return ( Vec.x != x ) || ( Vec.y != y ); } inline LVector2i operator*( const int A ) const { return LVector2i( x * A, y * A ); } inline LVector2i operator/( const int A ) const { return LVector2i( x / A, y / A ); } inline const int* ToIntPtr() const { return &x; }; inline int* ToIntPtr() { return &x; }; inline float Length() const { return sqrt( static_cast<float>( x * x + y * y ) ); }; inline int SqrLength() const { return x * x + y * y; }; }; /// 3D vector class LVector3 { public: float x; float y; float z; public: LVector3(): x( 0 ), y( 0 ), z( 0 ) {}; LVector3( float lx, float ly, float lz ): x( lx ), y( ly ), z( lz ) {}; LVector3( int lx, int ly, int lz ): x( static_cast<float>( lx ) ), y( static_cast<float>( ly ) ), z( static_cast<float>( lz ) ) {}; LVector3( const LVector2& Vec, const float lz ): x( Vec.x ), y( Vec.y ), z( lz ) {}; explicit LVector3( const LVector2& Vec ): x( Vec.x ), y( Vec.y ), z( 0 ) {}; explicit LVector3( const float lx ): x( lx ), y( lx ), z( lx ) {}; // // LVector3 // inline float operator[]( const int Index ) const { return ( &x )[Index]; } inline float& operator[]( const int Index ) { return ( &x )[Index]; } inline LVector3 operator-() const { return LVector3( -x, -y, -z ); } inline LVector3 operator+() const { return LVector3( +x, +y, +z ); } inline LVector3 operator*( const float A ) const { return LVector3( x * A, y * A, z * A ); } inline LVector3 operator/( const float A ) const { return LVector3( x / A, y / A, z / A ); } inline LVector3 operator/( const LVector3& Vec ) const { return LVector3( x / Vec.x, y / Vec.y, z / Vec.z ); } inline LVector3 operator-( const LVector3& Vec ) const { return LVector3( x - Vec.x, y - Vec.y, z - Vec.z ); } inline LVector3 operator+( const LVector3& Vec ) const { return LVector3( x + Vec.x, y + Vec.y, z + Vec.z ); } LVector3& operator*=( const float A ) { x *= A; y *= A; z *= A; return *this; } LVector3& operator/=( const float A ) { // NO CHECKS HERE: maximum speed. x /= A; y /= A; z /= A; return *this; } LVector3& operator+=( const LVector3& Vec ) { x += Vec.x; y += Vec.y; z += Vec.z; return *this; } LVector3& operator-=( const LVector3& Vec ) { x -= Vec.x; y -= Vec.y; z -= Vec.z; return *this; } inline bool operator==( const LVector3& Vec ) const { return ( Vec.x == x ) && ( Vec.y == y ) && ( Vec.z == z ); } inline bool operator!=( const LVector3& Vec ) const { return ( Vec.x != x ) || ( Vec.y != y ) || ( Vec.z != z ); } /// Per-component multiplication inline LVector3 operator*( const LVector3& Vec ) const { return LVector3( x * Vec.x, y * Vec.y, z * Vec.z ); } inline float Dot( const LVector3& Vec ) const { return ( x * Vec.x + y * Vec.y + z * Vec.z ); } inline LVector3 Cross( const LVector3& Vec ) const { return LVector3( y * Vec.z - z * Vec.y, z * Vec.x - x * Vec.z, x * Vec.y - y * Vec.x ); } LVector3 OrthogonalVector() const { LVector3 Result = *this; Result.Normalize(); return Result.Cross( Result + LVector3( 1.0f, 2.0f, 3.0f ) ); } inline const float* ToFloatPtr() const { return &x; } inline float* ToFloatPtr() { return &x; } inline LVector2 ToVector2() const { return LVector2( x, y ); } void Lerp( const LVector3& Vec1, const LVector3& Vec2, float Factor ) { if ( Factor <= 0.0f ) { ( *this ) = Vec1; } else if ( Factor >= 1.0f ) { ( *this ) = Vec2; } else { ( *this ) = Vec1 + Factor * ( Vec2 - Vec1 ); } } void Normalize(); inline bool IsZeroVector( float Eps ) const { return ( fabs( x ) < Eps && fabs( y ) < Eps && fabs( z ) < Eps ); } inline LVector3 GetNormalized() const { LVector3 Vec( *this ); Vec.Normalize(); return Vec; }; inline float Length() const { return sqrt( x * x + y * y + z * z ); } inline float SqrLength() const { return x * x + y * y + z * z; } /// Get the zero-based index of this vector's maximum component inline int GetMaximumComponentIndex() const { return ( x > y ) ? ( ( x > z ) ? 0 : 2 ) : ( ( y > z ) ? 1 : 2 ); } /// Get the zero-based index of this vector's minimum component inline int GetMinimumComponentIndex() const { return ( x < y ) ? ( ( x < z ) ? 0 : 2 ) : ( ( y < z ) ? 1 : 2 ); } /// Calculate reflection vector (valid only for unit vectors) void Reflect( const LVector3& Normal ) { ( *this ) -= ( 2.0f * Dot( Normal ) ) * Normal; } /// Get reflection vector (valid only for unit vectors) inline LVector3 GetReflected( const LVector3& Normal ) const { LVector3 Vec( *this ); Vec.Reflect( Normal ); return Vec; } }; /// 3D integer vector class LVector3i { public: int x; int y; int z; public: LVector3i(): x( 0 ), y( 0 ), z( 0 ) {}; LVector3i( int lx, int ly, int lz ): x( lx ), y( ly ), z( lz ) {}; explicit LVector3i( const int lx ): x( lx ), y( lx ), z( lx ) {}; // // LVector3i // inline int operator[]( const int Index ) const { return ( &x )[Index]; } inline int& operator[]( const int Index ) { return ( &x )[Index]; } inline LVector3i operator-( const LVector3i& Vec ) const { return LVector3i( x - Vec.x, y - Vec.y, z - Vec.z ); } inline LVector3i operator+( const LVector3i& Vec ) const { return LVector3i( x + Vec.x, y + Vec.y, z + Vec.z ); } inline bool operator==( const LVector3i& Vec ) const { return ( Vec.x == x ) && ( Vec.y == y ) && ( Vec.z == z ); } inline bool operator!=( const LVector3i& Vec ) const { return ( Vec.x != x ) || ( Vec.y != y ) || ( Vec.z != z ); } inline LVector3i operator*( const int A ) const { return LVector3i( x * A, y * A, z * A ); } inline LVector3i operator/( const int A ) const { return LVector3i( x / A, y / A, z / A ); } inline const int* ToIntPtr() const { return &x; } inline int* ToIntPtr() { return &x; } inline float Length() const { return sqrt( static_cast<float>( x * x + y * y + z * z ) ); } inline int SqrLength() const { return x * x + y * y + z * z; } }; /// 4D int vector class LVector4i { public: int x; int y; int z; int w; public: LVector4i(): x( 0 ), y( 0 ), z( 0 ), w( 0 ) {}; LVector4i( int lx, int ly, int lz, int lw ): x( lx ), y( ly ), z( lz ), w( lw ) {}; explicit LVector4i( const int lx ): x( lx ), y( lx ), z( lx ), w( lx ) {}; // // LVector4i // inline int operator[]( const int Index ) const { return ( &x )[Index]; } inline int& operator[]( const int Index ) { return ( &x )[Index]; } inline bool operator==( const LVector4i& Vec ) const { return ( Vec.x == x ) && ( Vec.y == y ) && ( Vec.z == z ) && ( Vec.w == w ); } inline bool operator!=( const LVector4i& Vec ) const { return ( Vec.x != x ) || ( Vec.y != y ) || ( Vec.z != z ) || ( Vec.w != w ); } inline LVector2i XY() const { return LVector2i( x, y ); } inline LVector2i YX() const { return LVector2i( y, x ); } inline LVector3i XYZ() const { return LVector3i( x, y, z ); } inline LVector3i ZYX() const { return LVector3i( z, y, x ); } }; /// 4D vector class LVector4 { public: float x; float y; float z; float w; public: LVector4(): x( 0 ), y( 0 ), z( 0 ), w( 0 ) {}; LVector4( float lx, float ly ): x( lx ), y( ly ), z( 0 ), w( 0 ) {}; LVector4( float lx, float ly, float lz, float lw ): x( lx ), y( ly ), z( lz ), w( lw ) {}; LVector4( int lx, int ly, int lz, int lw ): x( static_cast<float>( lx ) ), y( static_cast<float>( ly ) ), z( static_cast<float>( lz ) ), w( static_cast<float>( lw ) ) {}; LVector4( const LVector3& Vec, const float lw ): x( Vec.x ), y( Vec.y ), z( Vec.z ), w( lw ) {}; explicit LVector4( const float lx ): x( lx ), y( lx ), z( lx ), w( lx ) {}; explicit LVector4( const LVector2& Vec ): x( Vec.x ), y( Vec.y ), z( 0 ), w( 0 ) {}; explicit LVector4( const LVector3& Vec ): x( Vec.x ), y( Vec.y ), z( Vec.z ), w( 0 ) {}; explicit LVector4( const LVector4i& Vec ): x( static_cast<float>( Vec.x ) ), y( static_cast<float>( Vec.y ) ), z( static_cast<float>( Vec.z ) ), w( static_cast<float>( Vec.w ) ) {}; // // LVector4 // inline float operator[]( const int Index ) const { return ( &x )[Index]; } inline float& operator[]( const int Index ) { return ( &x )[Index]; } inline LVector4 operator-( const LVector4& Vec ) const { return LVector4( x - Vec.x, y - Vec.y, z - Vec.z, w - Vec.w ); } inline LVector4 operator+( const LVector4& Vec ) const { return LVector4( x + Vec.x, y + Vec.y, z + Vec.z, w + Vec.w ); } inline LVector4 operator*( const float A ) const { return LVector4( x * A, y * A, z * A, w * A ); } inline LVector4 operator/( const float A ) const { return LVector4( x / A, y / A, z / A, w / A ); } inline LVector4& operator*=( const float A ) { x *= A; y *= A; z *= A; w *= A; return *this; } inline LVector4& operator/=( const float A ) { // We DO check for zero explicitly. This operator is used only three times in the engine x /= A; y /= A; z /= A; w /= A; return *this; } inline LVector4& operator -= ( const LVector4& Vec ) { x -= Vec.x; y -= Vec.y; z -= Vec.z; w -= Vec.w; return *this; } inline LVector4& operator += ( const LVector4& Vec ) { x += Vec.x; y += Vec.y; z += Vec.z; w += Vec.w; return *this; } inline bool operator==( const LVector4& Vec ) const { return ( Vec.x == x ) && ( Vec.y == y ) && ( Vec.z == z ) && ( Vec.w == w ); } inline bool operator!=( const LVector4& Vec ) const { return ( Vec.x != x ) || ( Vec.y != y ) || ( Vec.z != z ) || ( Vec.w != w ); } /// Per-component multiplication inline LVector4 operator*( const LVector4& Vec ) const { return LVector4( x * Vec.x, y * Vec.y, z * Vec.z, w * Vec.w ); } /// Dot product inline float Dot( const LVector4& Vec ) const { return ( x * Vec.x + y * Vec.y + z * Vec.z + w * Vec.w ); } inline const float* ToFloatPtr() const { return &x; } inline float* ToFloatPtr() { return &x; } inline LVector2 ToVector2() const { return LVector2( x, y ); } inline LVector3 ToVector3() const { return LVector3( x, y, z ); } void Lerp( const LVector4& Vec1, const LVector4& Vec2, float Factor ) { if ( Factor <= 0.0f ) { ( *this ) = Vec1; } else if ( Factor >= 1.0f ) { ( *this ) = Vec2; } else { ( *this ) = Vec1 + Factor * ( Vec2 - Vec1 ); } } void Normalize(); const LVector4& Saturate(); inline float Length() const { return sqrt( x * x + y * y + z * z + w * w ); } inline float SqrLength() const { return x * x + y * y + z * z + w * w; } inline bool IsZeroVector( float Eps ) const { return ( fabs( x ) < Eps && fabs( y ) < Eps && fabs( z ) < Eps && fabs( w ) < Eps ); } /// swizzlers inline LVector4 BGRA() const { return LVector4( z, y, x, w ); }; inline LVector2 XY() const { return LVector2( x, y ); } inline LVector2 YX() const { return LVector2( y, x ); } inline LVector3 XYZ() const { return LVector3( x, y, z ); } inline LVector3 ZYX() const { return LVector3( z, y, x ); } }; inline LVector2 operator*( const float A, const LVector2& B ) { return LVector2 ( B.x * A, B.y * A ); } inline LVector2i operator*( const int A, const LVector2i& B ) { return LVector2i( B.x * A, B.y * A ); } inline LVector3 operator*( const float A, const LVector3& B ) { return LVector3( B.x * A, B.y * A, B.z * A ); } inline LVector3 operator/( const float A, const LVector3& B ) { return LVector3( B.x / A, B.y / A, B.z / A ); } inline LVector4 operator*( const float A, const LVector4& B ) { return LVector4( B.x * A, B.y * A, B.z * A, B.w * A ); } /// Utility methods inline float LVector3_MixedProduct( const LVector3& A, const LVector3& B, const LVector3& C ) { return A.Dot( B.Cross( C ) ); } inline bool LVector3_AreCollinear( const LVector3& A, const LVector3& B, const LVector3& C, float Eps ) { return ( B - A ).Cross( C - A ).SqrLength() < Eps; } inline bool LVector3_AreComplanar( const LVector3& A, const LVector3& B, const LVector3& C, const LVector3& D, float Eps ) { return fabs( LVector3_MixedProduct( B - A, C - A, D - A ) ) < Eps; } // shortcuts using vec2 = LVector2; using vec3 = LVector3; using vec4 = LVector4; /// Linderdaum Math Library namespace Math { inline LVector3 SphericalToOrtho( float Radius, float Phi, float Theta ) { return LVector3( Radius * cosf( Phi * DTOR ), Radius * sinf( Phi * DTOR ), Radius * sinf( Theta * DTOR ) ); } inline float Vec2Angle( float x, float y ) { return 180.0f + RadToDeg ( atan2( y, x ) ); /* if ( x >= 0 ) { if ( y >= 0 ) { // x > 0, y > 0 // first Q return RadToDeg( acos( x ) ); } else { // x > 0, y < 0 // fourth Q return 360.0f - RadToDeg( acos( x ) ); } } else { if ( y >= 0 ) { // x < 0, y > 0 // second Q return 180.0f - RadToDeg( asin( y ) ); } else { // x < 0, y < 0 // third Q return 180.0f + RadToDeg( asin( -y ) ); } } */ } inline LVector2 ToPolar( float RR, float Angle ) { return RR * vec2( cosf( DegToRad( Angle ) ), sinf( DegToRad( Angle ) ) ); } /** Spherical and Cartesian coordinates r = |(x,y,z)| phi = arccos(y) theta = atan2(x,z) x = r * cos(theta) * sin(phi) y = r * cos(theta) * cos(phi) z = r * sin(theta) */ /// Convert (x,y,z) to (r, phi, theta) inline LVector3 CartesianToSpherical( const LVector3& Pos ) { LVector3 Result; LVector3 NPos = Pos.GetNormalized(); Result.x = Pos.Length(); Result.y = Math::Vec2Angle( NPos.x, NPos.y ); Result.z = RadToDeg( acos( NPos.z ) ); return Result; } /// Convert (r, phi, theta) to (x,y,z) inline LVector3 SphericalToCartesian( const LVector3& Sph ) { float sinPhi = sin( Sph.y ); float cosPhi = cos( Sph.y ); float sinTheta = sin( Sph.z ); float cosTheta = cos( Sph.z ); return Sph.x * LVector3( cosTheta * sinPhi, cosTheta * cosPhi, sinTheta ); } inline LVector3 Barycentric2D( float x, float y, float x1, float y1, float x2, float y2, float x3, float y3 ) { float detT = ( y2 - y3 ) * ( x1 - x3 ) + ( x3 - x2 ) * ( y1 - y3 ); float l1 = ( ( y2 - y3 ) * ( x - x3 ) + ( x3 - x2 ) * ( y - y3 ) ) / detT; float l2 = ( ( y3 - y1 ) * ( x - x3 ) + ( x1 - x3 ) * ( y - y3 ) ) / detT; return LVector3( l1, l2, 1.0f - l1 - l2 ); } inline float FactorAdjust( float Color, float Factor, float IntensityMax, float Gamma ) { return ( Color > 0.001f ) ? IntensityMax * pow( Color * Factor, Gamma ) : 0.0f; } /// http://miguelmoreno.net/sandbox/wavelengthtoRGB/ /// and http://www.midnightkite.com/color.html /// Wavelength from 350 to 780 inline LVector3 ColorFromWaveLength( float W ) { float Gamma = 1.0f; float Blue = 0.0f; float Green = 0.0f; float Red = 0.0f; float Factor = 0.0f; if ( ( W >= 350.0f ) && ( W < 440.0f ) ) { Red = -( W - 440.0f ) / ( 440.0f - 350.0f ); Blue = 1.0f; } else if ( ( W >= 440.0f ) && ( W < 490.0f ) ) { Green = ( W - 440.0f ) / ( 490.0f - 440.0f ); Blue = 1.0f; } else if ( ( W >= 490.0f ) && ( W < 510.0f ) ) { Green = 1.0f; Blue = -( W - 510.0f ) / ( 510.0f - 490.0f ); } else if ( ( W >= 510.0f ) && ( W < 580.0f ) ) { Red = ( W - 510.0f ) / ( 580.0f - 510.0f ); Green = 1.0f; } else if ( ( W >= 580.0f ) && ( W < 645.0f ) ) { Red = 1.0f; Green = -( W - 645.0f ) / ( 645.0f - 580.0f ); } else if ( ( W >= 645.0f ) && ( W <= 780.0f ) ) { Red = 1.0f; } if ( ( W >= 350.0f ) && ( W < 420.0f ) ) { Factor = 0.3f + 0.7f * ( W - 350.0f ) / ( 420.0f - 350.0f ); } else if ( ( W >= 420.0f ) && ( W < 700.0f ) ) { Factor = 1.0f; } else if ( ( W >= 700.0f ) && ( W <= 780.0f ) ) { Factor = 0.3f + 0.7f * ( 780.0f - W ) / ( 780.0f - 700.0f ); } Red = FactorAdjust( Red, Factor, 255.0f, Gamma ); Green = FactorAdjust( Green, Factor, 255.0f, Gamma ); Blue = FactorAdjust( Blue, Factor, 255.0f, Gamma ); return vec3( Red, Green, Blue ) / 255.0f; } /// [0..360] angle to the red..blue "rainbow" inline LVector3 ColorFromAngle( float phi ) { float startA = 0.0f; float dA = 10.0f; if ( phi < startA + dA || phi > 360.0f + startA - dA ) { float t = 0.0f; if ( phi > startA + dA ) { t = ( phi - startA + dA - 360.0f ) / ( 2.0f * dA ); } else { t = ( phi - startA + dA ) / ( 2.0f * dA ); } return t * ColorFromWaveLength( 350.0f ) + ( 1.0f - t ) * ColorFromWaveLength( 780.0f ); } // map [startA + dA, 360 + startA - dA] to [0..360] float phiN = ( phi - dA - startA ) / ( 360.0f - 2 * dA ); return ColorFromWaveLength( 780.0f + phiN * ( 350.0f - 780.0f ) ); } } // // LMatrix3 // /// 3x3 matrix class LMatrix3 { private: LVector3 FMatrix[3]; public: inline LMatrix3() {}; inline explicit LMatrix3( const float A ) { FMatrix[0] = LVector3( A ); FMatrix[1] = LVector3( A ); FMatrix[2] = LVector3( A ); }; inline explicit LMatrix3( const LVector3& X, const LVector3& Y, const LVector3& Z ) { FMatrix[0] = X; FMatrix[1] = Y; FMatrix[2] = Z; }; // // LMatrix3 // inline LVector3& operator[]( const int Index ) { return FMatrix[Index]; }; inline const LVector3& operator[]( const int Index ) const { return FMatrix[Index]; }; inline LMatrix3 operator*( const LMatrix3& Matrix ) const; inline LMatrix3 operator+( const LMatrix3& Matrix ) const; inline LVector3 operator*( const LVector3& Vector ) const; inline void ZeroMatrix() { for ( int i = 0; i <= 2; ++i ) { FMatrix[i] = LVector3( 0 ); } }; inline void IdentityMatrix() { ZeroMatrix(); for ( int i = 0; i <= 2; ++i ) { FMatrix[i][i] = 1.0f; } }; inline void RotateMatrixAxis( const float Angle, const LVector3& Axis ); inline void RotateMatrix( const LVector3& V1, const LVector3& V2 ); void Inverse(); LMatrix3 GetInversed() const; LMatrix3 GetTransposed() const; inline const float* ToFloatPtr() const { return FMatrix[0].ToFloatPtr(); }; inline float* ToFloatPtr() { return FMatrix[0].ToFloatPtr(); }; static const LMatrix3& Identity(); /// Make an orthogonal matrix out of the given one void Orthonormalize(); void CalculateEigenVectors( LVector3* V, float* D ) const; }; LMatrix3 LMatrix3::operator+( const LMatrix3& Matrix ) const { LMatrix3 Result; for ( int i = 0; i != 3; ++i ) { for ( int j = 0; j != 3; ++j ) { Result[i][j] = FMatrix[i][j] + Matrix[i][j]; } } return Result; } LMatrix3 LMatrix3::operator*( const LMatrix3& Matrix ) const { LMatrix3 Result; const float* M1Ptr = ToFloatPtr(); const float* M2Ptr = Matrix.ToFloatPtr(); float* RPtr = Result.ToFloatPtr(); for ( int i = 0; i != 3; ++i ) { for ( int j = 0; j != 3; ++j ) { *RPtr = M1Ptr[0] * M2Ptr[ 0 * 3 + j ] + M1Ptr[1] * M2Ptr[ 1 * 3 + j ] + M1Ptr[2] * M2Ptr[ 2 * 3 + j ]; RPtr++; } M1Ptr += 3; } return Result; } LVector3 LMatrix3::operator*( const LVector3& Vector ) const { return LVector3( FMatrix[ 0 ].x * Vector.x + FMatrix[ 1 ].x * Vector.y + FMatrix[ 2 ].x * Vector.z, FMatrix[ 0 ].y * Vector.x + FMatrix[ 1 ].y * Vector.y + FMatrix[ 2 ].y * Vector.z, FMatrix[ 0 ].z * Vector.x + FMatrix[ 1 ].z * Vector.y + FMatrix[ 2 ].z * Vector.z ); } void LMatrix3::RotateMatrixAxis( const float Angle, const LVector3& Axis ) { float CosA = cos( Angle ); float SinA = sin( Angle ); LVector3 NAxis = Axis.GetNormalized(); float Ax = NAxis.x; float Ay = NAxis.y; float Az = NAxis.z; float AxAx = Ax * Ax; float AxAy = Ax * Ay; float AxAz = Ax * Az; float AyAx = AxAy; float AyAy = Ay * Ay; float AyAz = Ay * Az; float AzAx = AxAz; float AzAy = AyAz; float AzAz = Az * Az; FMatrix[0][0] = AxAx + ( 1.0f - AxAx ) * CosA; FMatrix[0][1] = AxAy * ( 1.0f - CosA ) + Az * SinA; FMatrix[0][2] = AxAz * ( 1.0f - CosA ) - Ay * SinA; FMatrix[1][0] = AyAx * ( 1.0f - CosA ) - Az * SinA; FMatrix[1][1] = AyAy + ( 1.0f - AyAy ) * CosA; FMatrix[1][2] = AyAz * ( 1.0f - CosA ) + Ax * SinA; FMatrix[2][0] = AzAx * ( 1.0f - CosA ) + Ay * SinA; FMatrix[2][1] = AzAy * ( 1.0f - CosA ) - Ax * SinA; FMatrix[2][2] = AzAz + ( 1.0f - AzAz ) * CosA; } void LMatrix3::RotateMatrix( const LVector3& V1, const LVector3& V2 ) { LVector3 Vec1 = V1.GetNormalized(); LVector3 Vec2 = V2.GetNormalized(); LVector3 Axis = Vec1.Cross( Vec2 ); float CosAngle, Angle; if ( Axis.Length() == 0 ) { Axis = LVector3( 0.0f, 0.0f, 1.0f ); } CosAngle = Vec1.Dot( Vec2 ); Angle = acos( CosAngle ); RotateMatrixAxis( Angle, Axis ); } // // LMatrix4 // /// 4x4 matrix class LMatrix4 { private: LVector4 FMatrix[4]; public: inline LMatrix4() {}; inline explicit LMatrix4( const float A ) { FMatrix[0] = LVector4( A ); FMatrix[1] = LVector4( A ); FMatrix[2] = LVector4( A ); FMatrix[3] = LVector4( A ); }; inline explicit LMatrix4( float a00, float a01, float a02, float a03, float a10, float a11, float a12, float a13, float a20, float a21, float a22, float a23, float a30, float a31, float a32, float a33 ) { FMatrix[0] = LVector4( a00, a01, a02, a03 ); FMatrix[1] = LVector4( a10, a11, a12, a13 ); FMatrix[2] = LVector4( a20, a21, a22, a23 ); FMatrix[3] = LVector4( a30, a31, a32, a33 ); }; inline explicit LMatrix4( const LVector4& X, const LVector4& Y, const LVector4& Z, const LVector4& W ) { FMatrix[0] = X; FMatrix[1] = Y; FMatrix[2] = Z; FMatrix[3] = W; }; inline explicit LMatrix4( const float* Floats16 ) { for ( int i = 0; i != 4; i++ ) for ( int j = 0; j != 4; j++ ) { FMatrix[i][j] = Floats16[i * 4 + j]; } } inline explicit LMatrix4( const LMatrix3& mtx3 ) { for ( int i = 0 ; i < 3 ; i++ ) for ( int j = 0 ; j < 3 ; j++ ) { FMatrix[i][j] = mtx3[i][j]; } FMatrix[0][3] = FMatrix[1][3] = FMatrix[2][3] = 0; FMatrix[3][0] = FMatrix[3][1] = FMatrix[3][2] = 0; FMatrix[3][3] = 1; } // // LMatrix4 // inline LVector4& operator[]( const int Index ) { return FMatrix[Index]; }; inline const LVector4& operator[]( const int Index ) const { return FMatrix[Index]; }; inline LMatrix4 operator*( const LMatrix4& Matrix ) const; inline LVector4 operator*( const LVector4& Vector ) const; inline LVector3 operator*( const LVector3& Vector ) const; inline void ZeroMatrix() { for ( int i = 0; i <= 3; ++i ) { FMatrix[i] = LVector4( 0 ); } }; inline void IdentityMatrix() { ZeroMatrix(); for ( int i = 0; i <= 3; ++i ) { FMatrix[i][i] = 1.0f; } }; inline void TranslateMatrix( const LVector3& Vector ); inline void ScaleMatrix( const LVector3& Vector ); inline void RotateMatrixAxis( const float Angle, const LVector3& Axis ); inline void RotateMatrix( const LVector3& V1, const LVector3& V2 ); bool IsIdentityMatrix() const; void Inverse(); LMatrix4 GetInversed() const; inline void Transpose(); inline LMatrix4 GetTransposed() const; LMatrix3 ExtractMatrix3() const; void SetSubMatrix( const LMatrix3& Mtx ); float Det() const; inline const float* ToFloatPtr() const { return FMatrix[0].ToFloatPtr(); }; inline float* ToFloatPtr() { return FMatrix[0].ToFloatPtr(); }; /// Extract 3x3 part LMatrix3 ToMatrix3() const; /// Obsolete. Use LTransform::euler_angles instead LMatrix4 FromPitchPanRoll( float Pitch, float Pan, float Roll ); #pragma region Static constructors for typical matrices /// Identity matrix (don't use for global variables initialization) static const LMatrix4& Identity(); /// Identity matrix static LMatrix4 IdentityStatic(); /// Translation matrix static LMatrix4 GetTranslateMatrix( const LVector3& Vector ); /// Scaling matrix static LMatrix4 GetScaleMatrix( const LVector3& Vector ); /// Rotation matrix for (axis,angle) pair static LMatrix4 GetRotateMatrixAxis( const float Angle, const LVector3& Axis ); /// Diag * Identity() matrix static LMatrix4 GetDiagonalMatrix( float Diag ); /// Form diagonal matrix from vector components static LMatrix4 GetDiagonalMatrixV( const LVector4& Diag ); static LMatrix4 GetFromPitchPanRoll( float Pitch, float Pan, float Roll ); #pragma endregion }; inline bool operator== ( const LMatrix4& M1, const LMatrix4& M2 ) { const float* M1Ptr = M1.ToFloatPtr(); const float* M2Ptr = M2.ToFloatPtr(); for ( int i = 0; i != 16; ++i ) { if ( M1Ptr[i] != M2Ptr[i] ) { return false; } } return true; } inline bool operator!= ( const LMatrix4& M1, const LMatrix4& M2 ) { const float* M1Ptr = M1.ToFloatPtr(); const float* M2Ptr = M2.ToFloatPtr(); for ( int i = 0; i != 16; ++i ) { if ( M1Ptr[i] != M2Ptr[i] ) { return true; } } return false; } LMatrix4 LMatrix4::operator*( const LMatrix4& Matrix ) const { LMatrix4 Result; const float* M1Ptr = ToFloatPtr(); const float* M2Ptr = Matrix.ToFloatPtr(); float* RPtr = Result.ToFloatPtr(); //#pragma omp parallel for shared(RPtr) for ( int i = 0; i < 4; ++i ) { for ( int j = 0; j < 4; ++j ) { *RPtr = M1Ptr[0] * M2Ptr[ 0 * 4 + j ] + M1Ptr[1] * M2Ptr[ 1 * 4 + j ] + M1Ptr[2] * M2Ptr[ 2 * 4 + j ] + M1Ptr[3] * M2Ptr[ 3 * 4 + j ]; RPtr++; } M1Ptr += 4; } return Result; } LVector4 LMatrix4::operator*( const LVector4& Vector ) const { return LVector4( FMatrix[ 0 ].x * Vector.x + FMatrix[ 1 ].x * Vector.y + FMatrix[ 2 ].x * Vector.z + FMatrix[ 3 ].x * Vector.w, FMatrix[ 0 ].y * Vector.x + FMatrix[ 1 ].y * Vector.y + FMatrix[ 2 ].y * Vector.z + FMatrix[ 3 ].y * Vector.w, FMatrix[ 0 ].z * Vector.x + FMatrix[ 1 ].z * Vector.y + FMatrix[ 2 ].z * Vector.z + FMatrix[ 3 ].z * Vector.w, FMatrix[ 0 ].w * Vector.x + FMatrix[ 1 ].w * Vector.y + FMatrix[ 2 ].w * Vector.z + FMatrix[ 3 ].w * Vector.w ); } LVector3 LMatrix4::operator*( const LVector3& Vector ) const { return LVector3( FMatrix[ 0 ].x * Vector.x + FMatrix[ 1 ].x * Vector.y + FMatrix[ 2 ].x * Vector.z + FMatrix[ 3 ].x, FMatrix[ 0 ].y * Vector.x + FMatrix[ 1 ].y * Vector.y + FMatrix[ 2 ].y * Vector.z + FMatrix[ 3 ].y, FMatrix[ 0 ].z * Vector.x + FMatrix[ 1 ].z * Vector.y + FMatrix[ 2 ].z * Vector.z + FMatrix[ 3 ].z ); } void LMatrix4::TranslateMatrix( const LVector3& Vector ) { IdentityMatrix(); FMatrix[3] = LVector4( Vector ); FMatrix[3][3] = 1.0f; } void LMatrix4::ScaleMatrix( const LVector3& Vector ) { ZeroMatrix(); FMatrix[0][0] = Vector.x; FMatrix[1][1] = Vector.y; FMatrix[2][2] = Vector.z; FMatrix[3][3] = 1.0f; } void LMatrix4::RotateMatrixAxis( const float Angle, const LVector3& Axis ) { float CosA = cos( Angle ); float SinA = sin( Angle ); LVector3 NAxis = Axis.GetNormalized(); float Ax = NAxis.x; float Ay = NAxis.y; float Az = NAxis.z; float AxAx = Ax * Ax; float AxAy = Ax * Ay; float AxAz = Ax * Az; float AyAx = AxAy; float AyAy = Ay * Ay; float AyAz = Ay * Az; float AzAx = AxAz; float AzAy = AyAz; float AzAz = Az * Az; FMatrix[0][0] = AxAx + ( 1.0f - AxAx ) * CosA; FMatrix[0][1] = AxAy * ( 1.0f - CosA ) + Az * SinA; FMatrix[0][2] = AxAz * ( 1.0f - CosA ) - Ay * SinA; FMatrix[0][3] = 0; FMatrix[1][0] = AyAx * ( 1.0f - CosA ) - Az * SinA; FMatrix[1][1] = AyAy + ( 1.0f - AyAy ) * CosA; FMatrix[1][2] = AyAz * ( 1.0f - CosA ) + Ax * SinA; FMatrix[1][3] = 0; FMatrix[2][0] = AzAx * ( 1.0f - CosA ) + Ay * SinA; FMatrix[2][1] = AzAy * ( 1.0f - CosA ) - Ax * SinA; FMatrix[2][2] = AzAz + ( 1.0f - AzAz ) * CosA; FMatrix[2][3] = 0; FMatrix[3][0] = 0; FMatrix[3][1] = 0; FMatrix[3][2] = 0; FMatrix[3][3] = 1.0f; } void LMatrix4::RotateMatrix( const LVector3& V1, const LVector3& V2 ) { LVector3 Vec1 = V1.GetNormalized(); LVector3 Vec2 = V2.GetNormalized(); LVector3 Axis = Vec1.Cross( Vec2 ); float CosAngle, Angle; if ( Axis.Length() == 0 ) { Axis = LVector3( 0.0f, 0.0f, 1.0f ); } CosAngle = Vec1.Dot( Vec2 ); Angle = acos( CosAngle ); RotateMatrixAxis( Angle, Axis ); } /// Associate a skew-symmetric matrix to the vector V inline LMatrix3 VectorStar( const LVector3& V ) { LMatrix3 M; M[0][0] = 0.0f; M[1][0] = V.x; M[2][0] = V.z; M[0][1] = -V.x; M[1][1] = 0.0f; M[2][1] = -V.y; M[0][2] = -V.z; M[1][2] = V.y; M[2][2] = 0.0f; return M; } /// M[i][j] = V1[i] * V2[j] inline LMatrix3 TensorProduct_VecByVec( const LVector3& V1, const LVector3& V2 ) { LMatrix3 M; for ( int i = 0 ; i < 3 ; i++ ) { for ( int j = 0 ; j < 3 ; j++ ) { M[i][j] = V1[i] * V2[j]; } } return M; } inline void LMatrix4::Transpose() { float t; int i, j; for ( i = 0 ; i < 4 ; i++ ) for ( j = 0 ; j < 4 ; j++ ) { t = FMatrix[i][j]; FMatrix[i][j] = FMatrix[j][i]; FMatrix[j][i] = t; } } inline LMatrix4 LMatrix4::GetTransposed() const { LMatrix4 Res; int i, j; for ( i = 0 ; i < 4 ; i++ ) for ( j = 0 ; j < 4 ; j++ ) { Res[i][j] = FMatrix[j][i]; } return Res; } using mat3 = LMatrix3; using mat4 = LMatrix4; namespace Math { inline LMatrix4 IdentityMatrix4() { static LMatrix4 IdMatrix; IdMatrix.IdentityMatrix(); return IdMatrix; } } // namespace Math class LQuaternion; inline LQuaternion operator*( const LQuaternion& Q1, const LQuaternion& Q2 ); inline LQuaternion operator+( const LQuaternion& Q1, const LQuaternion& Q2 ); inline LQuaternion operator*( const float A, const LQuaternion& B ); /// Quaternion class LQuaternion { public: LVector3 FVec; float FW; public: LQuaternion(): FVec( 0.0f ), FW( 1.0f ) {}; LQuaternion( const LQuaternion& q ): FVec( q.FVec.x, q.FVec.y, q.FVec.z ), FW( q.FW ) {}; LQuaternion( float x, float y, float z, float w ): FVec( x, y, z ), FW( w ) {}; LQuaternion( const LVector3& Vec, float w ): FVec( Vec ), FW( w ) {}; explicit LQuaternion( const LVector4& Vec ): FVec( Vec.ToVector3() ), FW( Vec.w ) {}; explicit LQuaternion( const LMatrix3& Mtx ) { FromMatrix3( Mtx ); } explicit LQuaternion( const LMatrix4& Mtx ) { FromMatrix3( Mtx.ExtractMatrix3() ); } // // LQuaternion // inline LQuaternion& Conjugate() { FVec = -FVec; return *this; } inline LQuaternion& operator =( const LQuaternion& Q ) { FVec = Q.FVec; FW = Q.FW; return *this; } inline LQuaternion& operator+=( const LQuaternion& Q ) { FVec += Q.FVec; FW += Q.FW; return *this; } inline LQuaternion& operator-=( const LQuaternion& Q ) { FVec -= Q.FVec; FW -= Q.FW; return *this; } inline LQuaternion& operator*=( const LQuaternion& Q ) { LQuaternion Q1 = *this; LQuaternion Q2 = Q; *this = LQuaternion( Q1.FW * Q2.FVec.x + Q1.FVec.x * Q2.FW + Q1.FVec.y * Q2.FVec.z - Q1.FVec.z * Q2.FVec.y, Q1.FW * Q2.FVec.y + Q1.FVec.y * Q2.FW + Q1.FVec.z * Q2.FVec.x - Q1.FVec.x * Q2.FVec.z, Q1.FW * Q2.FVec.z + Q1.FVec.z * Q2.FW + Q1.FVec.x * Q2.FVec.y - Q1.FVec.y * Q2.FVec.x, Q1.FW * Q2.FW - Q1.FVec.x * Q2.FVec.x - Q1.FVec.y * Q2.FVec.y - Q1.FVec.z * Q2.FVec.z ); return *this; /* LVector3 Vec( Q.FVec * FW + Q.FW * FVec + Q.FVec.Cross( FVec ) ); FW = Q.FW * FW - Q.FVec*FVec; FVec = Vec; return *this; */ } inline LQuaternion& operator*=( const float F ) { FVec *= F; FW *= F; return *this; } inline LQuaternion& operator/=( const float F ) { const float InvF = 1.0f / F; FVec *= InvF; FW *= InvF; return *this; } void Normalize() { LVector4 Vec( FVec, FW ); Vec.Normalize(); FVec = Vec.ToVector3(); FW = Vec.w; } void ReNormalizeW() { float Wr = 1.0f - ( FVec.x * FVec.x ) - ( FVec.y * FVec.y ) - ( FVec.z * FVec.z ); FW = ( Wr < 0.0f ) ? 0.0f : -sqrt( Wr ); } LVector3 RotateVector( const LVector3& Vector ) const { LQuaternion p ( Vector, 0.0f ); LQuaternion qConj ( -FVec, FW ); p = *this * p * qConj; return p.FVec; } void IdentityQuaternion() { FVec = LVector3( 0.0f ); FW = 1.0f; } void FromMatrix3( const LMatrix3& mtx ) { float s0, s1, s2; int k0, k1, k2, k3; float m00 = mtx[0][0]; float m11 = mtx[1][1]; float m22 = mtx[2][2]; if ( m00 + m11 + m22 > 0.0f ) { k0 = 3; k1 = 2; k2 = 1; k3 = 0; s0 = s1 = s2 = 1.0f; } else if ( m00 > m11 && m00 > m22 ) { k0 = 0; k1 = 1; k2 = 2; k3 = 3; s0 = 1.0f; s1 = -1.0f; s2 = -1.0f; } else if ( m11 > m22 ) { k0 = 1; k1 = 0; k2 = 3; k3 = 2; s0 = -1.0f; s1 = 1.0f; s2 = -1.0f; } else { k0 = 2; k1 = 3; k2 = 0; k3 = 1; s0 = -1.0f; s1 = -1.0f; s2 = 1.0f; } float t = s0 * m00 + s1 * m11 + s2 * m22 + 1.0f; float s = /*ReciprocalSqrt( t )*/ 0.5f / sqrt( t ); LVector4 vv; vv[k0] = s * t; vv[k1] = ( mtx[0][1] - s2 * mtx[1][0] ) * s; vv[k2] = ( mtx[2][0] - s1 * mtx[0][2] ) * s; vv[k3] = ( mtx[1][2] - s0 * mtx[2][1] ) * s; FVec.x = vv[0]; FVec.y = vv[1]; FVec.z = vv[2]; FW = -vv[3]; // seems to be erroneous... } LMatrix3 ToMatrix3() const { LMatrix3 M; float wx, wy, wz; float xx, yy, yz; float xy, xz, zz; float x2, y2, z2; x2 = FVec.x + FVec.x; y2 = FVec.y + FVec.y; z2 = FVec.z + FVec.z; xx = FVec.x * x2; xy = FVec.x * y2; xz = FVec.x * z2; yy = FVec.y * y2; yz = FVec.y * z2; zz = FVec.z * z2; wx = FW * x2; wy = FW * y2; wz = FW * z2; M[ 0 ][ 0 ] = 1.0f - ( yy + zz ); M[ 0 ][ 1 ] = xy - wz; M[ 0 ][ 2 ] = xz + wy; M[ 1 ][ 0 ] = xy + wz; M[ 1 ][ 1 ] = 1.0f - ( xx + zz ); M[ 1 ][ 2 ] = yz - wx; M[ 2 ][ 0 ] = xz - wy; M[ 2 ][ 1 ] = yz + wx; M[ 2 ][ 2 ] = 1.0f - ( xx + yy ); return M; } LVector4 ToVector4() const { return LVector4( FVec, FW ); } void FromAxisAngle( const LVector3& Axis, const float Angle ) { const float HalfAngle = Angle / 2.0f; FVec = Axis * sinf( HalfAngle ); FW = cosf( HalfAngle ); } void ToAxisAngle( LVector3& Axis, float& Angle ) const { Angle = 2.0f * acosf( FW ); Axis = ( FVec.SqrLength() > ::Math::EPSILON ) ? FVec.GetNormalized() : LVector3( 1.0f, 0.0f, 0.0f ); } /** \brief Spherical linear interpolation Code from http://www.euclideanspace.com/maths/algebra/realNormedAlgebra/quaternions/slerp/index.htm */ void SLERP( const LQuaternion& qa, const LQuaternion& q2, float t ) { LQuaternion qb = q2; // quaternion to return LQuaternion qm; // Calculate angle between them. double cosHalfTheta = qa.FVec.Dot( qb.FVec ) + qa.FW * qb.FW; // if qa=qb or qa=-qb then theta = 0 and we can return qa if ( fabs( cosHalfTheta ) >= 1.0 ) { *this = qa; return; } if ( cosHalfTheta < 0 ) { qb.FVec = LVector3( -qb.FVec.x, -qb.FVec.y, -qb.FVec.z ); qb.FW = -qb.FW; cosHalfTheta = -cosHalfTheta; } // Calculate temporary values. double halfTheta = acos( cosHalfTheta ); double sinHalfTheta = sqrt( 1.0 - cosHalfTheta * cosHalfTheta ); // if theta = 180 degrees then result is not fully defined // we could rotate around any axis normal to qa or qb if ( fabs( sinHalfTheta ) < 0.001 ) { *this = 0.5f * ( qa + qb ); return; } float ratioA = static_cast<float>( sin( ( 1 - t ) * halfTheta ) / sinHalfTheta ); float ratioB = static_cast<float>( sin( t * halfTheta ) / sinHalfTheta ); //calculate Quaternion. *this = ( ratioA * qa + ratioB * qb ); } }; inline LQuaternion operator+( const LQuaternion& Q1, const LQuaternion& Q2 ) { return LQuaternion( Q1.FVec + Q2.FVec, Q1.FW + Q2.FW ); } inline LQuaternion operator-( const LQuaternion& Q1, const LQuaternion& Q2 ) { return LQuaternion( Q1.FVec - Q2.FVec, Q1.FW - Q2.FW ); } inline LQuaternion operator-( const LQuaternion& Q ) { return LQuaternion( -Q.FVec, -Q.FW ); } inline LQuaternion operator*( const float A, const LQuaternion& B ) { return LQuaternion( A * B.FVec, A * B.FW ); } inline LQuaternion operator*( const LQuaternion& Q1, const LQuaternion& Q2 ) { /* return LQuaternion( Q1.FVec * Q2.FW + Q1.FW * Q2.FVec + Q1.FVec.Cross( Q2.FVec ), Q1.FW * Q2.FW - Q1.FVec*Q2.FVec ); */ return LQuaternion( Q1.FW * Q2.FVec.x + Q1.FVec.x * Q2.FW + Q1.FVec.y * Q2.FVec.z - Q1.FVec.z * Q2.FVec.y, Q1.FW * Q2.FVec.y + Q1.FVec.y * Q2.FW + Q1.FVec.z * Q2.FVec.x - Q1.FVec.x * Q2.FVec.z, Q1.FW * Q2.FVec.z + Q1.FVec.z * Q2.FW + Q1.FVec.x * Q2.FVec.y - Q1.FVec.y * Q2.FVec.x, Q1.FW * Q2.FW - Q1.FVec.x * Q2.FVec.x - Q1.FVec.y * Q2.FVec.y - Q1.FVec.z * Q2.FVec.z ); } using quat = LQuaternion; /** INAR-encoding for Euler angles system (index - negation - alternate - reversal) i n a r Sys. i n a r Sys. i n a r Sys. i n a r Sys. (1,0,0,0) xzxs (1,0,1,0) xzys (1,1,0,0) xyxs (1,1,1,0) xyzs (2,0,0,0) yxys (2,0,1,0) yxzs (2,1,0,0) yzys (2,1,1,0) yzxs (3,0,0,0) zyzs (3,0,1,0) zyxs (3,1,0,0) zxzs (3,1,1,0) zxys (1,0,0,1) xzxr (1,0,1,1) yzxr (1,1,0,1) xyxr (1,1,1,1) zyxr (2,0,0,1) yxyr (2,0,1,1) zxyr (2,1,0,1) yzyr (2,1,1,1) xzyr (3,0,0,1) zyzr (3,0,1,1) xyzr (3,1,0,1) zxzr (3,1,1,1) yxzr */ enum LEulerAngleSystem { Euler_xzxs = 0, Euler_xzys, Euler_xyxs, Euler_xyzs, Euler_yxys, Euler_yxzs, Euler_yzys, Euler_yzxs, Euler_zyzs, Euler_zyxs, Euler_zxzs, Euler_zxys, Euler_xzxr, Euler_yzxr, Euler_xyxr, Euler_zyxr, Euler_yxyr, Euler_zxyr, Euler_yzyr, Euler_xzyr, Euler_zyzr, Euler_xyzr, Euler_zxzr, Euler_yxzr }; enum LProjectionType { PROJECTION_ORTHOGRAPHIC = 0, PROJECTION_PERSPECTIVE = 1, PROJECTION_ERROR = 2 }; namespace Math { LMatrix4 Ortho2D( float L, float R, float B, float T ); LMatrix4 Ortho( float L, float R, float B, float T, float N, float F ); LMatrix4 Perspective( float FOV, float Aspect, float NearCP, float FarCP ); /// true - the left eye, false - the right eye LMatrix4 PerspectiveStereo( float FOV, float Aspect, float NearCP, float FarCP, float IOD, float FocalLength, bool WhichEye ); LMatrix4 Frustum( float L, float R, float B, float T, float N, float F ); LMatrix4 LookAt( LVector3 Eye, LVector3 Center, LVector3 Up ); /// true - the left eye, false - the right eye LMatrix4 LookAtStereo( LVector3 Eye, LVector3 Center, LVector3 Up, float IOD, bool WhichEye ); LMatrix4 LookAtStereoMatrix( const LMatrix4& View, float IOD, bool WhichEye ); LVector3 GetViewingDirection( const LMatrix4& View ); /// from left to right LVector3 GetSideDirection( const LMatrix4& View ); LMatrix4 ProjectReflectionTexture( const LMatrix4& Projection, const LMatrix4& ModelView ); LVector3 ProjectPoint( LVector3 Point, const LMatrix4& Projection, const LMatrix4& ModelView ); LVector3 ProjectPointNDC( const LVector3& Point, const LMatrix4& Projection, const LMatrix4& ModelView ); LVector3 UnProjectPoint( LVector3 Point, const LMatrix4& Projection, const LMatrix4& ModelView ); LVector3 UnProjectPointNDC( const LVector3& Point, const LMatrix4& Projection, const LMatrix4& ModelView ); LMatrix4 ObliqueReflectionProjection( const LMatrix4& Projection, const LVector4& ClipPlane ); /// look at the center of a AABB from such a distance (closest) that the box is entirely visible LMatrix4 GetClosestViewForAABB( const LVector3& MinV, const LVector3& MaxV, const LMatrix4& Proj, const LVector3& Eye, const LVector3& Up ); void FrustumToParams( const LMatrix4& M, float& L, float& R, float& B, float& T, float& N, float& F ); void PerspectiveToParams( const LMatrix4& M, float& FOV, float& Aspect, float& NearCP, float& FarCP ); void OrthoToParams( const LMatrix4& M, float& L, float& R, float& B, float& T, float& N, float& F ); bool IsOrthographicProjection( const LMatrix4& M ); bool IsPerspectiveProjection( const LMatrix4& M ); void TransformRayToCoordinates( const LVector3& P, const LVector3& A, const LMatrix4& Transform, LVector3& TransP, LVector3& TransA ); LProjectionType DetermineProjectionType( const LMatrix4& Projection ); }; /// Utility class representing the 3D transformation class LTransform { public: LTransform(); LTransform( const LVector3& pos, const LQuaternion& quat ); LTransform( const LMatrix4& mtx4 ); void SetPositionAndAngles( const LVector3& Pos, float AngleX, float AngleY, float AngleZ ); void SetPositionAndAngles( const LVector3& Pos, const LVector3& Angles ); void SetPositionAndAxisAngle( const LVector3& Pos, const LVector3& Axis, float Angle ); void SetPositionAndOrientation( const LVector3& pos, const LQuaternion& quat ); void SetPosMatrixAndAxisAngle( const LMatrix4& Pos, const LVector3& Axis, float Angle ); void SetMatrix4( const LMatrix4& Mtx4 ) { FMatrix = Mtx4; }; const LMatrix4& GetMatrix4() const { return FMatrix; }; void GetPositionAndOrientation( LVector3& Pos, LQuaternion& Q ) const; void LookAt( const LVector3& From, const LVector3& To, const LVector3& Up ); /// Lerp + SLerp between O1 and O2 for t in [0,1] void Interpolate( const LTransform& O1, const LTransform& O2, float t ); void SetAngleSystem( LEulerAngleSystem AS ) { FAngleSystem = AS; } LEulerAngleSystem GetAngleSystem() const { return FAngleSystem; } void SetAngles( const LVector3& Angles ); LVector3 GetAngles() const; void SetAngleTriple( float T1, float T2, float T3 ); void GetAngleTriple( float& T1, float& T2, float& T3 ) const; virtual void SetPosition( const LVector3& P ); virtual LVector3 GetPosition() const; public: LMatrix4 FMatrix; /// Currently used 3-angle system for orientation LEulerAngleSystem FAngleSystem; }; void DecomposeTransformation( const LMatrix4& T, LVector3& Pos, LMatrix4& Rot ); void DecomposeCameraTransformation( const LMatrix4& T, LVector3& Pos, LMatrix4& Rot ); LMatrix4 ComposeTransformation( const LVector3& Pos, const LMatrix4& Rot ); LMatrix4 ComposeCameraTransformation( const LVector3& Pos, const LMatrix4& Rot ); bool IntersectRayToPlane( const LVector3& P, const LVector3& A, const LVector3& N, float D, LVector3& isect ); /** Calculate three Euler angles from orientation in a given axis system (ZXZ etc.) */ void MatrixToAngles( LEulerAngleSystem Sys, const LMatrix3& M, float& T1, float& T2, float& T3 ); /** Calculate orientation from three Euler angles in a given axis system (ZXZ etc.) */ void AnglesToMatrix( LEulerAngleSystem Sys, LMatrix4& M, float T1, float T2, float T3 ); namespace Math { void Randomize( int Seed ); int Random( int L ); float Random( float L ); float Random(); float RandomInRange( float RMin, float RMax ); LVector3 RandomVector3InRange( const LVector3& RMin, const LVector3& RMax ); LVector4 RandomVector4InRange( const LVector4& RMin, const LVector4& RMax ); int RandomInRange( int RMin, int RMax ); }; /// Max noise dimension const int MAX_DIMENSIONS = 3; /// Maximum number of octaves in an fBm object const int MAX_OCTAVES = 128; /// Transformation of colors between different representation formats namespace LColors { inline LVector4 BGRA8toRGBAvec4( const unsigned int* Color ) { unsigned char* BC = ( unsigned char* )Color; return LVector4( static_cast<float>( BC[2] ) / 255.0f, static_cast<float>( BC[1] ) / 255.0f, static_cast<float>( BC[0] ) / 255.0f, static_cast<float>( BC[3] ) / 255.0f ); }; inline LVector4 RGBA8toRGBAvec4( const unsigned int* Color ) { unsigned char* BC = ( unsigned char* )Color; return LVector4( static_cast<float>( BC[0] ) / 255.0f, static_cast<float>( BC[1] ) / 255.0f, static_cast<float>( BC[2] ) / 255.0f, static_cast<float>( BC[3] ) / 255.0f ); }; }; // LColors /// 2D rectangle (usually represent a 2D screen area) class LRect { public: LRect() : FExtents() {}; LRect( const LRect& Rect ) : FExtents( Rect.FExtents ) {}; explicit LRect( float Value ) : FExtents( Value ) {}; explicit LRect( const LVector4& Extents ) : FExtents( Extents ) {}; LRect( float X1, float Y1 ) : FExtents( X1, Y1, 1.0f, 1.0f ) {}; LRect( float X1, float Y1, float X2, float Y2 ) : FExtents( X1, Y1, X2, Y2 ) {}; LRect( const LVector2& TL, const LVector2& BR ) : FExtents( TL.x, TL.y, BR.x, BR.y ) {}; LRect( int X1, int Y1, int X2, int Y2 ) : FExtents( static_cast<float>( X1 ), static_cast<float>( Y1 ), static_cast<float>( X2 ), static_cast<float>( Y2 ) ) {}; inline float X1() const { return FExtents.x; }; inline float Y1() const { return FExtents.y; }; inline float X2() const { return FExtents.z; }; inline float Y2() const { return FExtents.w; }; inline float& X1() { return FExtents.x; }; inline float& Y1() { return FExtents.y; }; inline float& X2() { return FExtents.z; }; inline float& Y2() { return FExtents.w; }; inline float GetWidth() const { return fabs( FExtents.z - FExtents.x ); }; inline float GetHeight() const { return fabs( FExtents.w - FExtents.y ); }; inline void SetWidth( float Width ) { FExtents.z = FExtents.x + Width; }; inline void SetHeight( float Height ) { FExtents.w = FExtents.y + Height; }; inline float GetCenterX() const { return ( FExtents.x + FExtents.z ) * 0.5f; }; inline float GetCenterY() const { return ( FExtents.y + FExtents.w ) * 0.5f; }; inline LVector2 GetCenter() const { return LVector2( GetCenterX(), GetCenterY() ); }; inline LVector2 GetTopLeft() const { return LVector2( FExtents.x, FExtents.y ); }; inline LVector2 GetTopRight() const { return LVector2( FExtents.z, FExtents.y ); }; inline LVector2 GetBottomLeft() const { return LVector2( FExtents.x, FExtents.w ); }; inline LVector2 GetBottomRight() const { return LVector2( FExtents.z, FExtents.w ); }; /// fit Other rect inside this rect respecting aspect ratio of Other LRect Fit( const LRect& Other ) { float OldWidth = Other.GetWidth(); float OldHeight = Other.GetHeight(); float Aspect1 = OldWidth / GetWidth(); float Aspect2 = OldHeight / GetHeight(); float Aspect = ::Math::LMax( Aspect1, Aspect2 ); LRect Result( Other ); Result.SetWidth( OldWidth / Aspect ); Result.SetHeight( OldHeight / Aspect ); return Result; } /// center Other rect inside this rect LRect Center( const LRect& Other ) { float W = Other.GetWidth(); float H = Other.GetHeight(); LVector2 C = GetCenter(); float X = C.x - W / 2; float Y = C.y - H / 2; return LRect( X, Y, X + W, Y + H ); } /// construct a LRect enclosing this and Other assuming FixOrder() has been already called on both void Combine( const LRect& Other ) { FExtents.x = ::Math::LMin( FExtents.x, Other.FExtents.x ); FExtents.z = ::Math::LMax( FExtents.z, Other.FExtents.z ); FExtents.y = ::Math::LMin( FExtents.y, Other.FExtents.y ); FExtents.w = ::Math::LMax( FExtents.w, Other.FExtents.w ); } void Remap( int Width, int Height ) { float W = 1.0f / Width; float H = 1.0f / Height; float dW = 0.5f * W; float dH = 0.5f * H; FExtents = LVector4( X1() * W + dW, Y1() * H - dH, X2() * W + dW, Y2() * H + dH ); } inline void MoveTo( const LVector2& LeftTop ) { FExtents = LVector4( LeftTop.x, LeftTop.y, LeftTop.x + GetWidth(), LeftTop.y + GetHeight() ); } inline void MoveRel( const LVector2& Delta ) { FExtents += LVector4( Delta.x, Delta.y, Delta.x, Delta.y ); } /// Check if the Point is inside this rectangle inline bool ContainsPoint( const LVector2& Point ) const { return Point.x >= FExtents.x && Point.y >= FExtents.y && Point.x <= FExtents.z && Point.y <= FExtents.w; } /// Check if R overlaps this rectangle inline bool Overlap( const LRect& R ) const { return !( X1() > R.X2() || R.X1() > X2() || Y1() > R.Y2() || R.Y1() > Y2() ); } /// ensure X2>=X1 and Y2>=Y1 void FixOrder() { if ( FExtents.x > FExtents.z ) { ::Math::SwapValues( FExtents.x, FExtents.z ); } if ( FExtents.y > FExtents.w ) { ::Math::SwapValues( FExtents.y, FExtents.w ); } } inline const LVector4& ToVector4() const { return FExtents; }; inline LVector4& ToVector4() { return FExtents; }; inline const float* ToFloatPtr() const { return FExtents.ToFloatPtr(); }; inline float* ToFloatPtr() { return FExtents.ToFloatPtr(); }; /// the resulting rect lays within this rect and has the aspect ratio Aspect LRect GetAdjustedAspectRect( float Aspect ) const { float W = GetWidth(); float H = GetHeight(); if ( W / H > Aspect ) { float Wt = W * Aspect; return LRect( X1() + 0.5f * ( W - Wt ), Y1(), X1() + 0.5f * ( W + Wt ), Y1() + H ); } float Ht = H / Aspect; return LRect( X1(), Y1() + 0.5f * ( H - Ht ), X1() + W, Y1() + 0.5f * ( H + Ht ) ); } public: /// X:X1, Y:Y1, Z:X2, W:Y2 LVector4 FExtents; }; using ivec4 = LVector4i; using vec4 = LVector4;
convolution_winograd_transform_pack4.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2022 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void conv3x3s1_winograd63_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 6; const int h_tiles = (h - 2) / 6; const int tiles = w_tiles * h_tiles; // const float itm[8][8] = { // {1.0f, 0.0f, -5.25f, 0.00f, 5.25f, 0.00f, -1.0f, 0.0f}, // // {0.0f, 1.0f, 1.00f, -4.25f, -4.25f, 1.00f, 1.0f, 0.0f}, // {0.0f, -1.0f, 1.00f, 4.25f, -4.25f, -1.00f, 1.0f, 0.0f}, // // {0.0f, 0.5f, 0.25f, -2.50f, -1.25f, 2.00f, 1.0f, 0.0f}, // {0.0f, -0.5f, 0.25f, 2.50f, -1.25f, -2.00f, 1.0f, 0.0f}, // // {0.0f, 2.0f, 4.00f, -2.50f, -5.00f, 0.50f, 1.0f, 0.0f}, // {0.0f, -2.0f, 4.00f, 2.50f, -5.00f, -0.50f, 1.0f, 0.0f}, // // {0.0f, -1.0f, 0.00f, 5.25f, 0.00f, -5.25f, 0.0f, 1.0f} // }; // 0 = r00 - r06 + (r04 - r02) * 5.25 // 7 = r07 - r01 + (r03 - r05) * 5.25 // 1 = (r02 + r06 - r04 * 4.25) + (r01 - r03 * 4.25 + r05) // 2 = (r02 + r06 - r04 * 4.25) - (r01 - r03 * 4.25 + r05) // 3 = (r06 + r02 * 0.25 - r04 * 1.25) + (r01 * 0.5 - r03 * 2.5 + r05 * 2) // 4 = (r06 + r02 * 0.25 - r04 * 1.25) - (r01 * 0.5 - r03 * 2.5 + r05 * 2) // reuse r04 * 1.25 // reuse r03 * 2.5 // 5 = (r06 + (r02 - r04 * 1.25) * 4) + (r01 * 2 - r03 * 2.5 + r05 * 0.5) // 6 = (r06 + (r02 - r04 * 1.25) * 4) - (r01 * 2 - r03 * 2.5 + r05 * 0.5) #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[8][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _r06 = vld1q_f32(r0 + 24); float32x4_t _r07 = vld1q_f32(r0 + 28); float32x4_t _tmp0m = vmlaq_n_f32(vsubq_f32(_r00, _r06), vsubq_f32(_r04, _r02), 5.25f); float32x4_t _tmp7m = vmlaq_n_f32(vsubq_f32(_r07, _r01), vsubq_f32(_r03, _r05), 5.25f); float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_r02, _r06), _r04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_r01, _r05), _r03, 4.25f); float32x4_t _tmp1m = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _tmp2m = vsubq_f32(_tmp12a, _tmp12b); float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_r06, _r02, 0.25f), _r04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 0.5f), _r03, 2.5f), _r05, 2.f); float32x4_t _tmp3m = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _tmp4m = vsubq_f32(_tmp34a, _tmp34b); float32x4_t _tmp56a = vmlaq_n_f32(_r06, vmlsq_n_f32(_r02, _r04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_r01, 2.f), _r03, 2.5f), _r05, 0.5f); float32x4_t _tmp5m = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _tmp6m = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); vst1q_f32(tmp[6][m], _tmp6m); vst1q_f32(tmp[7][m], _tmp7m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; float* r0_tm_6 = r0_tm_0 + tiles * 24; float* r0_tm_7 = r0_tm_0 + tiles * 28; for (int m = 0; m < 8; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _r0tm0 = vmlaq_n_f32(vsubq_f32(_tmp00, _tmp06), vsubq_f32(_tmp04, _tmp02), 5.25f); float32x4_t _r0tm7 = vmlaq_n_f32(vsubq_f32(_tmp07, _tmp01), vsubq_f32(_tmp03, _tmp05), 5.25f); float32x4_t _tmp12a = vmlsq_n_f32(vaddq_f32(_tmp02, _tmp06), _tmp04, 4.25f); float32x4_t _tmp12b = vmlsq_n_f32(vaddq_f32(_tmp01, _tmp05), _tmp03, 4.25f); float32x4_t _r0tm1 = vaddq_f32(_tmp12a, _tmp12b); float32x4_t _r0tm2 = vsubq_f32(_tmp12a, _tmp12b); float32x4_t _tmp34a = vmlsq_n_f32(vmlaq_n_f32(_tmp06, _tmp02, 0.25f), _tmp04, 1.25f); float32x4_t _tmp34b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 0.5f), _tmp03, 2.5f), _tmp05, 2.f); float32x4_t _r0tm3 = vaddq_f32(_tmp34a, _tmp34b); float32x4_t _r0tm4 = vsubq_f32(_tmp34a, _tmp34b); float32x4_t _tmp56a = vmlaq_n_f32(_tmp06, vmlsq_n_f32(_tmp02, _tmp04, 1.25f), 4.f); float32x4_t _tmp56b = vmlaq_n_f32(vmlsq_n_f32(vmulq_n_f32(_tmp01, 2.f), _tmp03, 2.5f), _tmp05, 0.5f); float32x4_t _r0tm5 = vaddq_f32(_tmp56a, _tmp56b); float32x4_t _r0tm6 = vsubq_f32(_tmp56a, _tmp56b); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); vst1q_f32(r0_tm_6, _r0tm6); vst1q_f32(r0_tm_7, _r0tm7); r0_tm_0 += tiles * 32; r0_tm_1 += tiles * 32; r0_tm_2 += tiles * 32; r0_tm_3 += tiles * 32; r0_tm_4 += tiles * 32; r0_tm_5 += tiles * 32; r0_tm_6 += tiles * 32; r0_tm_7 += tiles * 32; } } } } } static void conv3x3s1_winograd63_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 6; const int h_tiles = outh / 6; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[6][8] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 32.0f, 32.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 16.0f,-16.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 8.0f, 8.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 4.0f, -4.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 16.0f, 16.0f, 2.0f, 2.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 32.0f, -32.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r0 + (r1 + r2) + (r3 + r4) + (r5 + r6) * 32 // 1 = (r1 - r2) + (r3 - r4) * 2 + (r5 - r6) * 16 // 2 = (r1 + r2) + (r3 + r4) * 4 + (r5 + r6) * 8 // 3 = (r1 - r2) + (r3 - r4) * 8 + (r5 - r6) * 4 // 4 = (r1 + r2) + (r3 + r4) * 16+ (r5 + r6) * 2 // 5 = r7 + (r1 - r2) + (r3 - r4) * 32+ (r5 - r6) #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[6][8][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; const float* output0_tm_6 = output0_tm_0 + tiles * 24; const float* output0_tm_7 = output0_tm_0 + tiles * 28; float* output0 = out0.row(i * 6) + (j * 6) * 4; for (int m = 0; m < 8; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _out0tm6 = vld1q_f32(output0_tm_6); float32x4_t _out0tm7 = vld1q_f32(output0_tm_7); float32x4_t _tmp024a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp135a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp024b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp135b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp024c = vaddq_f32(_out0tm5, _out0tm6); float32x4_t _tmp135c = vsubq_f32(_out0tm5, _out0tm6); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f)); float32x4_t _tmp2m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f); float32x4_t _tmp4m = vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f); float32x4_t _tmp1m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f); float32x4_t _tmp3m = vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f); float32x4_t _tmp5m = vaddq_f32(vaddq_f32(_out0tm7, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f)); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); output0_tm_0 += tiles * 32; output0_tm_1 += tiles * 32; output0_tm_2 += tiles * 32; output0_tm_3 += tiles * 32; output0_tm_4 += tiles * 32; output0_tm_5 += tiles * 32; output0_tm_6 += tiles * 32; output0_tm_7 += tiles * 32; } for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp06 = vld1q_f32(tmp[m][6]); float32x4_t _tmp07 = vld1q_f32(tmp[m][7]); float32x4_t _tmp024a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp135a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp024b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp135b = vsubq_f32(_tmp03, _tmp04); float32x4_t _tmp024c = vaddq_f32(_tmp05, _tmp06); float32x4_t _tmp135c = vsubq_f32(_tmp05, _tmp06); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp024a), vmlaq_n_f32(_tmp024b, _tmp024c, 32.f))); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 4.f), _tmp024c, 8.f)); float32x4_t _out04 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp024a, _tmp024b, 16.f), _tmp024c, 2.f)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 2.f), _tmp135c, 16.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vmlaq_n_f32(_tmp135a, _tmp135b, 8.f), _tmp135c, 4.f)); float32x4_t _out05 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp07, _tmp135a), vmlaq_n_f32(_tmp135c, _tmp135b, 32.f))); vst1q_f32(output0, _out00); vst1q_f32(output0 + 4, _out01); vst1q_f32(output0 + 8, _out02); vst1q_f32(output0 + 12, _out03); vst1q_f32(output0 + 16, _out04); vst1q_f32(output0 + 20, _out05); output0 += outw * 4; } } } } } static void conv3x3s1_winograd43_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 4; const int h_tiles = (h - 2) / 4; const int tiles = w_tiles * h_tiles; // const float itm[6][6] = { // {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f}, // {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f}, // {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f}, // {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f}, // {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f} // }; // 0 = 4 * r00 - 5 * r02 + r04 // 1 = -4 * (r01 + r02) + r04 + r03 // 2 = 4 * (r01 - r02) + r04 - r03 // 3 = -2 * (r01 - r03) + r04 - r02 // 4 = 2 * (r01 - r03) + r04 - r02 // 5 = 4 * r01 - 5 * r03 + r05 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[6][6][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _r04 = vld1q_f32(r0 + 16); float32x4_t _r05 = vld1q_f32(r0 + 20); float32x4_t _tmp0m = vmlsq_n_f32(vmlaq_n_f32(_r04, _r00, 4.f), _r02, 5.f); float32x4_t _tmp1m = vmlsq_n_f32(vaddq_f32(_r04, _r03), vaddq_f32(_r01, _r02), 4.f); float32x4_t _tmp2m = vmlaq_n_f32(vsubq_f32(_r04, _r03), vsubq_f32(_r01, _r02), 4.f); float32x4_t _tmp3m = vmlsq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp4m = vmlaq_n_f32(vsubq_f32(_r04, _r02), vsubq_f32(_r01, _r03), 2.f); float32x4_t _tmp5m = vmlsq_n_f32(vmlaq_n_f32(_r05, _r01, 4.f), _r03, 5.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); vst1q_f32(tmp[4][m], _tmp4m); vst1q_f32(tmp[5][m], _tmp5m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; float* r0_tm_4 = r0_tm_0 + tiles * 16; float* r0_tm_5 = r0_tm_0 + tiles * 20; for (int m = 0; m < 6; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _r0tm0 = vmlsq_n_f32(vmlaq_n_f32(_tmp04, _tmp00, 4.f), _tmp02, 5.f); float32x4_t _r0tm1 = vmlsq_n_f32(vaddq_f32(_tmp04, _tmp03), vaddq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm2 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp03), vsubq_f32(_tmp01, _tmp02), 4.f); float32x4_t _r0tm3 = vmlsq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm4 = vmlaq_n_f32(vsubq_f32(_tmp04, _tmp02), vsubq_f32(_tmp01, _tmp03), 2.f); float32x4_t _r0tm5 = vmlsq_n_f32(vmlaq_n_f32(_tmp05, _tmp01, 4.f), _tmp03, 5.f); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); vst1q_f32(r0_tm_4, _r0tm4); vst1q_f32(r0_tm_5, _r0tm5); r0_tm_0 += tiles * 24; r0_tm_1 += tiles * 24; r0_tm_2 += tiles * 24; r0_tm_3 += tiles * 24; r0_tm_4 += tiles * 24; r0_tm_5 += tiles * 24; } } } } } static void conv3x3s1_winograd43_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 4; const int h_tiles = outh / 4; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[4][6] = { // {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f}, // {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f} // }; // 0 = r00 + (r01 + r02) + (r03 + r04) // 1 = (r01 - r02) + (r03 - r04) * 2 // 2 = (r01 + r02) + (r03 + r04) * 4 // 3 = r05 + (r01 - r02) + (r03 - r04) * 8 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[4][6][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; const float* output0_tm_4 = output0_tm_0 + tiles * 16; const float* output0_tm_5 = output0_tm_0 + tiles * 20; float* output0 = out0.row(i * 4) + (j * 4) * 4; for (int m = 0; m < 6; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _out0tm4 = vld1q_f32(output0_tm_4); float32x4_t _out0tm5 = vld1q_f32(output0_tm_5); float32x4_t _tmp02a = vaddq_f32(_out0tm1, _out0tm2); float32x4_t _tmp13a = vsubq_f32(_out0tm1, _out0tm2); float32x4_t _tmp02b = vaddq_f32(_out0tm3, _out0tm4); float32x4_t _tmp13b = vsubq_f32(_out0tm3, _out0tm4); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _tmp02a), _tmp02b); float32x4_t _tmp1m = vmlaq_n_f32(_tmp13a, _tmp13b, 2.f); float32x4_t _tmp2m = vmlaq_n_f32(_tmp02a, _tmp02b, 4.f); float32x4_t _tmp3m = vmlaq_n_f32(vaddq_f32(_out0tm5, _tmp13a), _tmp13b, 8.f); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); output0_tm_0 += tiles * 24; output0_tm_1 += tiles * 24; output0_tm_2 += tiles * 24; output0_tm_3 += tiles * 24; output0_tm_4 += tiles * 24; output0_tm_5 += tiles * 24; } for (int m = 0; m < 4; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _tmp04 = vld1q_f32(tmp[m][4]); float32x4_t _tmp05 = vld1q_f32(tmp[m][5]); float32x4_t _tmp02a = vaddq_f32(_tmp01, _tmp02); float32x4_t _tmp13a = vsubq_f32(_tmp01, _tmp02); float32x4_t _tmp02b = vaddq_f32(_tmp03, _tmp04); float32x4_t _tmp13b = vsubq_f32(_tmp03, _tmp04); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp02a), _tmp02b)); float32x4_t _out01 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp13a, _tmp13b, 2.f)); float32x4_t _out02 = vaddq_f32(_bias0, vmlaq_n_f32(_tmp02a, _tmp02b, 4.f)); float32x4_t _out03 = vaddq_f32(_bias0, vmlaq_n_f32(vaddq_f32(_tmp05, _tmp13a), _tmp13b, 8.f)); vst1q_f32(output0, _out00); vst1q_f32(output0 + 4, _out01); vst1q_f32(output0 + 8, _out02); vst1q_f32(output0 + 12, _out03); output0 += outw * 4; } } } } } static void conv3x3s1_winograd23_transform_input_pack4_neon(const Mat& bottom_blob, Mat& bottom_blob_tm, const Option& opt) { const int w = bottom_blob.w; const int h = bottom_blob.h; const int inch = bottom_blob.c; const int w_tiles = (w - 2) / 2; const int h_tiles = (h - 2) / 2; const int tiles = w_tiles * h_tiles; // const float itm[4][4] = { // {1.0f, 0.0f, -1.0f, 0.0f}, // {0.0f, 1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 1.00f, 0.0f}, // {0.0f, -1.0f, 0.00f, 1.0f} // }; // 0 = r00 - r02 // 1 = r01 + r02 // 2 = r02 - r01 // 3 = r03 - r01 #pragma omp parallel for num_threads(opt.num_threads) for (int q = 0; q < inch; q++) { const Mat img0 = bottom_blob.channel(q); Mat img0_tm = bottom_blob_tm.channel(q); float tmp[4][4][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* r0 = img0.row(i * 2) + (j * 2) * 4; for (int m = 0; m < 4; m++) { float32x4_t _r00 = vld1q_f32(r0); float32x4_t _r01 = vld1q_f32(r0 + 4); float32x4_t _r02 = vld1q_f32(r0 + 8); float32x4_t _r03 = vld1q_f32(r0 + 12); float32x4_t _tmp0m = vsubq_f32(_r00, _r02); float32x4_t _tmp1m = vaddq_f32(_r01, _r02); float32x4_t _tmp2m = vsubq_f32(_r02, _r01); float32x4_t _tmp3m = vsubq_f32(_r03, _r01); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); vst1q_f32(tmp[2][m], _tmp2m); vst1q_f32(tmp[3][m], _tmp3m); r0 += w * 4; } float* r0_tm_0 = (float*)img0_tm + (i * w_tiles + j) * 4; float* r0_tm_1 = r0_tm_0 + tiles * 4; float* r0_tm_2 = r0_tm_0 + tiles * 8; float* r0_tm_3 = r0_tm_0 + tiles * 12; for (int m = 0; m < 4; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _r0tm0 = vsubq_f32(_tmp00, _tmp02); float32x4_t _r0tm1 = vaddq_f32(_tmp01, _tmp02); float32x4_t _r0tm2 = vsubq_f32(_tmp02, _tmp01); float32x4_t _r0tm3 = vsubq_f32(_tmp03, _tmp01); vst1q_f32(r0_tm_0, _r0tm0); vst1q_f32(r0_tm_1, _r0tm1); vst1q_f32(r0_tm_2, _r0tm2); vst1q_f32(r0_tm_3, _r0tm3); r0_tm_0 += tiles * 16; r0_tm_1 += tiles * 16; r0_tm_2 += tiles * 16; r0_tm_3 += tiles * 16; } } } } } static void conv3x3s1_winograd23_transform_output_pack4_neon(const Mat& top_blob_tm, Mat& top_blob, const Mat& bias, const Option& opt) { const int outw = top_blob.w; const int outh = top_blob.h; const int outch = top_blob.c; const int w_tiles = outw / 2; const int h_tiles = outh / 2; const int tiles = w_tiles * h_tiles; const float* biasptr = bias; // const float otm[2][4] = { // {1.0f, 1.0f, 1.0f, 0.0f}, // {0.0f, 1.0f, -1.0f, 1.0f} // }; // 0 = r00 + r01 + r02 // 1 = r01 - r02 + r03 #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { const Mat out0_tm = top_blob_tm.channel(p); Mat out0 = top_blob.channel(p); float32x4_t _bias0 = biasptr ? vld1q_f32(biasptr + p * 4) : vdupq_n_f32(0.f); float tmp[2][4][4]; // tile for (int i = 0; i < h_tiles; i++) { for (int j = 0; j < w_tiles; j++) { const float* output0_tm_0 = (const float*)out0_tm + (i * w_tiles + j) * 4; const float* output0_tm_1 = output0_tm_0 + tiles * 4; const float* output0_tm_2 = output0_tm_0 + tiles * 8; const float* output0_tm_3 = output0_tm_0 + tiles * 12; float* output0 = out0.row(i * 2) + (j * 2) * 4; for (int m = 0; m < 4; m++) { float32x4_t _out0tm0 = vld1q_f32(output0_tm_0); float32x4_t _out0tm1 = vld1q_f32(output0_tm_1); float32x4_t _out0tm2 = vld1q_f32(output0_tm_2); float32x4_t _out0tm3 = vld1q_f32(output0_tm_3); float32x4_t _tmp0m = vaddq_f32(vaddq_f32(_out0tm0, _out0tm1), _out0tm2); float32x4_t _tmp1m = vaddq_f32(vsubq_f32(_out0tm1, _out0tm2), _out0tm3); vst1q_f32(tmp[0][m], _tmp0m); vst1q_f32(tmp[1][m], _tmp1m); output0_tm_0 += tiles * 16; output0_tm_1 += tiles * 16; output0_tm_2 += tiles * 16; output0_tm_3 += tiles * 16; } for (int m = 0; m < 2; m++) { float32x4_t _tmp00 = vld1q_f32(tmp[m][0]); float32x4_t _tmp01 = vld1q_f32(tmp[m][1]); float32x4_t _tmp02 = vld1q_f32(tmp[m][2]); float32x4_t _tmp03 = vld1q_f32(tmp[m][3]); float32x4_t _out00 = vaddq_f32(_bias0, vaddq_f32(vaddq_f32(_tmp00, _tmp01), _tmp02)); float32x4_t _out01 = vaddq_f32(_bias0, vaddq_f32(vsubq_f32(_tmp01, _tmp02), _tmp03)); vst1q_f32(output0, _out00); vst1q_f32(output0 + 4, _out01); output0 += outw * 4; } } } } }
normalize_ref.c
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * License); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ /* * Copyright (c) 2020, OPEN AI LAB * Author: jxyang@openailab.com */ #include <math.h> #include <unistd.h> #include "sys_port.h" #include "module.h" #include "tengine_errno.h" #include "tengine_log.h" #include "tengine_ir.h" #include "../../cpu_node_ops.h" #include "tengine_op.h" #include "normalize_param.h" static void norm_channel(float* input, float* output, float* buffer, float* scale, int hw, int channel, int num_thread) { memset(buffer, 0, hw * sizeof(float)); //#pragma omp parallel for num_threads(num_thread) for (int i = 0; i < channel; i++) { for (int j = 0; j < hw; j++) { float data = *(input + i * hw + j); buffer[j] += (data * data); } } //#pragma omp parallel for num_threads(num_thread) for (int j = 0; j < hw; j++) { buffer[j] = 1.f / sqrt(buffer[j]); } //#pragma omp parallel for num_threads(num_thread) for (int i = 0; i < channel; i++) { for (int j = 0; j < hw; j++) { float data = *(input + i * hw + j); *(output + i * hw + j) = data * buffer[j] * scale[i]; } } } static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { return 0; } static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph) { struct ir_node* ir_node = exec_node->ir_node; struct ir_graph* ir_graph = ir_node->graph; struct ir_tensor* input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]); struct ir_tensor* output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]); struct ir_tensor* scale_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[1]); normalize_param_t* param = ( normalize_param_t* )(ir_node->op.param_mem); float* input_org = ( float* )input_tensor->data; float* output_org = ( float* )output_tensor->data; float* sclae_org = ( float* )scale_tensor->data; int batch_number = input_tensor->dims[0]; int channel_num = input_tensor->dims[1]; int channel_size = (input_tensor->dims[2]) * (input_tensor->dims[3]); int img_size = channel_num * channel_size; float* buffer = ( float* )sys_malloc(channel_size * sizeof(float)); if (param->channel_shared == 0 && param->across_spatial == 0) { for (int i = 0; i < batch_number; i++) { norm_channel(input_org, output_org, buffer, sclae_org, channel_size, channel_num, exec_graph->num_thread); input_org += img_size; output_org += img_size; } } sys_free(buffer); return 0; } static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node) { return OPS_SCORE_BEST; } static struct node_ops normalize_node_ops = {.prerun = NULL, .run = run, .reshape = NULL, .postrun = NULL, .init_node = init_node, .release_node = release_node, .score = score}; static int ret_normalize_node_ops(void* arg) { return register_builtin_node_ops(OP_NORMALIZE, &normalize_node_ops); } static int unret_normalize_node_ops(void* arg) { return unregister_builtin_node_ops(OP_NORMALIZE, &normalize_node_ops); } AUTO_REGISTER_OPS(ret_normalize_node_ops); AUTO_UNREGISTER_OPS(unret_normalize_node_ops);
taskwait_omp.c
/* -*- Mode: C; c-basic-offset:4 ; indent-tabs-mode:nil ; -*- */ /* * See LICENSE.txt in top-level directory. */ /* * A bunch of n tasks (1st arg) are created by a single thread. * Each task creates two tasks more and executes a taskwait directive */ #include <assert.h> #include <omp.h> #include <stdio.h> #include <stdlib.h> #include <unistd.h> #define NUM_TASKS 50000 #define NUM_REPS 1 int o = 0; int pp = 0; void na(float value) { o++; } void sscal(float value, float *a) { *a = *a * value; } void presscal(float value, float *a) { #pragma omp task { sscal(value, a); } #pragma omp task { na(value); } #pragma omp taskwait } int main(int argc, char *argv[]) { int i, r, nthreads; double *time, avg_time = 0.0; char *str, *endptr; float *a; double time2 = 0.0; #pragma omp parallel { #pragma omp master { nthreads = omp_get_num_threads(); } } if (argc > 1) { str = argv[1]; } int ntasks = argc > 1 ? strtoll(str, &endptr, 10) : NUM_TASKS; if (ntasks < nthreads) ntasks = nthreads; int rep = (argc > 2) ? atoi(argv[2]) : NUM_REPS; time = malloc(sizeof(double) * rep); a = malloc(sizeof(float) * ntasks); for (i = 0; i < ntasks; i++) { a[i] = i + 100.0f; } for (r = 0; r < rep; r++) { time[r] = omp_get_wtime(); #pragma omp parallel { #pragma omp single { time2 = omp_get_wtime(); for (i = 0; i < ntasks; i++) { #pragma omp task firstprivate(i) { presscal(0.9f, &a[i]); } } time2 = omp_get_wtime() - time2; } } time[r] = omp_get_wtime() - time[r]; avg_time += time[r]; } for (i = 0; i < ntasks; i++) { if (a[i] != (i + 100.0f) * 0.9f) { printf("error: a[%d]=%2.f expected %2.f\n", i, a[i], (i + 100.0f) * 0.9f); } } avg_time /= rep; printf("nthreads: %d\nntasks: %d\nTime(s):%f\nCreation Time: %f\n", nthreads, ntasks, avg_time, time2); printf("o=%d and it should be %d\n", o, ntasks); printf("pp=%d and it should be %d\n", pp, ntasks); return EXIT_SUCCESS; }
taskdep_tied_threadid.c
// RUN: %libomp-compile-and-run // REQUIRES: abt #include "omp_testsuite.h" #include <stdio.h> #include <stdlib.h> #include <string.h> int calc_seq(int n) { int i, j, *buffer = (int *)malloc(sizeof(int) * n * n); for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { if (i == 0 && j == 0) { buffer[i * n + j] = 1; } else if (i == 0) { buffer[i * n + j] = buffer[i * n + (j - 1)]; } else if (j == 0) { buffer[i * n + j] = buffer[(i - 1) * n + j]; } else { buffer[i * n + j] = buffer[(i - 1) * n + j] + buffer[i * n + (j - 1)]; } } } int ret = buffer[(n - 1) * n + (n - 1)]; free(buffer); return ret; } #define TASK_TIED_CHECK(_val_index) \ do { \ int val_index = (_val_index); \ int omp_thread_id = omp_get_thread_num(); \ ABT_thread abt_thread; \ ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread)); \ \ _Pragma("omp taskyield") \ \ int omp_thread_id2 = omp_get_thread_num(); \ if (omp_thread_id == omp_thread_id2) { \ vals[val_index] += 1; \ } \ ABT_thread abt_thread2; \ ABT_EXIT_IF_FAIL(ABT_thread_self(&abt_thread2)); \ ABT_bool abt_thread_equal; \ ABT_EXIT_IF_FAIL(ABT_thread_equal(abt_thread, abt_thread2, \ &abt_thread_equal)); \ if (abt_thread_equal == ABT_TRUE) { \ vals[val_index] += 2; \ } \ \ ABT_EXIT_IF_FAIL(ABT_thread_yield()); \ \ int omp_thread_id3 = omp_get_thread_num(); \ if (omp_thread_id2 == omp_thread_id3) { \ vals[val_index] += 4; \ } \ } while (0) int test_taskdep_tied_threadid(int num_threads) { int n = 10; int seq_val, task_val; int vals[n * n]; memset(vals, 0, sizeof(int) * n * n); #pragma omp parallel shared(task_val) firstprivate(n) num_threads(num_threads) #pragma omp master { int i, j; int *A_buf = (int *)malloc(sizeof(int) * n * n); int **A = (int **)malloc(sizeof(int *) * n); for(i = 0; i < n; i++) { A[i] = A_buf + (i * n); for(j = 0; j < n; j++) { // Assign random values. A[i][j] = i * n + j; } } // A[i][j] is the root task. for(i = 0; i < n; i++) { for(j = 0; j < n; j++) { if (i == 0 && j == 0) { #pragma omp task depend(out:A[i][j]) firstprivate(A, i, j) { TASK_TIED_CHECK(i * n + j); A[i][j] = 1; } } else if (i == 0) { #pragma omp task depend(in:A[i][j - 1]) depend(out:A[i][j]) \ firstprivate(A, i, j) { TASK_TIED_CHECK(i * n + j); A[i][j] = A[i][j - 1]; } } else if (j == 0) { #pragma omp task depend(in:A[i - 1][j]) depend(out:A[i][j]) \ firstprivate(A, i, j) { TASK_TIED_CHECK(i * n + j); A[i][j] = A[i - 1][j]; } } else { #pragma omp task depend(in:A[i - 1][j], A[i][j - 1]) \ depend(out:A[i][j]) { TASK_TIED_CHECK(i * n + j); A[i][j] = A[i - 1][j] + A[i][j - 1]; } } } } #pragma omp taskwait task_val = A[n - 1][n - 1]; free(A); free(A_buf); } seq_val = calc_seq(n); if(seq_val != task_val) { printf("[%d] Failed: route(%d) = %d (ANS = %d)\n", num_threads, n, task_val, seq_val); return 0; } int index; for (index = 0; index < n * n; index++) { if (vals[index] != 7) { printf("vals[%d] == %d\n", index, vals[index]); return 0; } } return 1; } int main() { int i; int num_failed = 0; for (i = 0; i < REPETITIONS; i++) { if (!test_taskdep_tied_threadid(i + 1)) { num_failed++; } } return num_failed; }
ddot_kahan_omp_avx2_asm.c
#ifdef __AVX__ #include <stdio.h> #include <math.h> #include <omp.h> #include "immintrin.h" double ddot_kahan_avx2_asm(int, const double*, const double*, double*); void ddot_kahan_omp_avx2_asm(int N, const double *a, const double *b, double *r) { double *sum; double *c; int nthreads; #pragma omp parallel { #pragma omp single { nthreads = omp_get_num_threads(); if ((sum = (double *)malloc(nthreads * sizeof(double))) == NULL) { perror("malloc"); exit(EXIT_FAILURE); } if ((c = (double *)malloc(nthreads * sizeof(double))) == NULL) { perror("malloc"); exit(EXIT_FAILURE); } } } if (N < nthreads) { ddot_kahan_avx2_asm(N, a, b, r); return; } #pragma omp parallel { int i, id; id = omp_get_thread_num(); /* calculate each threads chunk */ int alignment = 64 / sizeof(double); int gchunk = ((N/alignment)+(nthreads-1))/nthreads; gchunk = gchunk * alignment; int chunk = gchunk; if ((id+1)*chunk > N) chunk = N-(id*chunk); if (chunk < 0) chunk = 0; /* each thread sums part of the array */ c[id] = ddot_kahan_avx2_asm(chunk, a+id*gchunk, b+id*gchunk, &sum[id]); } // end #pragma omp parallel /* perform scalar Kahan sum of partial sums */ double scalar_c = 0.0f; double scalar_sum = 0.0f; #pragma novector for (int i=0; i<nthreads; ++i) { scalar_c = scalar_c + c[i]; double y = sum[i]-scalar_c; double t = scalar_sum+y; scalar_c = (t-scalar_sum)-y; scalar_sum = t; } sum[0] = scalar_sum; *r = sum[0]; } #endif
vector.c
/****************************************************************************** * Copyright (c) 1998 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /****************************************************************************** * * Member functions for hypre_Vector class. * *****************************************************************************/ #include "_hypre_onedpl.hpp" #include "seq_mv.h" #include "_hypre_utilities.hpp" //RL: TODO vector_device.c, include cuda there /*-------------------------------------------------------------------------- * hypre_SeqVectorCreate *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorCreate( HYPRE_Int size ) { hypre_Vector *vector; vector = hypre_CTAlloc(hypre_Vector, 1, HYPRE_MEMORY_HOST); hypre_VectorData(vector) = NULL; hypre_VectorSize(vector) = size; hypre_VectorNumVectors(vector) = 1; hypre_VectorMultiVecStorageMethod(vector) = 0; /* set defaults */ hypre_VectorOwnsData(vector) = 1; hypre_VectorMemoryLocation(vector) = hypre_HandleMemoryLocation(hypre_handle()); return vector; } /*-------------------------------------------------------------------------- * hypre_SeqMultiVectorCreate *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqMultiVectorCreate( HYPRE_Int size, HYPRE_Int num_vectors ) { hypre_Vector *vector = hypre_SeqVectorCreate(size); hypre_VectorNumVectors(vector) = num_vectors; return vector; } /*-------------------------------------------------------------------------- * hypre_SeqVectorDestroy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorDestroy( hypre_Vector *vector ) { HYPRE_Int ierr = 0; if (vector) { HYPRE_MemoryLocation memory_location = hypre_VectorMemoryLocation(vector); if ( hypre_VectorOwnsData(vector) ) { hypre_TFree(hypre_VectorData(vector), memory_location); } hypre_TFree(vector, HYPRE_MEMORY_HOST); } return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorInitialize *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorInitialize_v2( hypre_Vector *vector, HYPRE_MemoryLocation memory_location ) { HYPRE_Int size = hypre_VectorSize(vector); HYPRE_Int ierr = 0; HYPRE_Int num_vectors = hypre_VectorNumVectors(vector); HYPRE_Int multivec_storage_method = hypre_VectorMultiVecStorageMethod(vector); hypre_VectorMemoryLocation(vector) = memory_location; /* Caveat: for pre-existing data, the memory location must be guaranteed * to be consistent with `memory_location' * Otherwise, mismatches will exist and problems will be encountered * when being used, and freed */ if ( !hypre_VectorData(vector) ) { hypre_VectorData(vector) = hypre_CTAlloc(HYPRE_Complex, num_vectors * size, memory_location); } if ( multivec_storage_method == 0 ) { hypre_VectorVectorStride(vector) = size; hypre_VectorIndexStride(vector) = 1; } else if ( multivec_storage_method == 1 ) { hypre_VectorVectorStride(vector) = 1; hypre_VectorIndexStride(vector) = num_vectors; } else { ++ierr; } return ierr; } HYPRE_Int hypre_SeqVectorInitialize( hypre_Vector *vector ) { HYPRE_Int ierr; ierr = hypre_SeqVectorInitialize_v2( vector, hypre_VectorMemoryLocation(vector) ); return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetDataOwner *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetDataOwner( hypre_Vector *vector, HYPRE_Int owns_data ) { HYPRE_Int ierr = 0; hypre_VectorOwnsData(vector) = owns_data; return ierr; } /*-------------------------------------------------------------------------- * ReadVector *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorRead( char *file_name ) { hypre_Vector *vector; FILE *fp; HYPRE_Complex *data; HYPRE_Int size; HYPRE_Int j; /*---------------------------------------------------------- * Read in the data *----------------------------------------------------------*/ fp = fopen(file_name, "r"); hypre_fscanf(fp, "%d", &size); vector = hypre_SeqVectorCreate(size); hypre_VectorMemoryLocation(vector) = HYPRE_MEMORY_HOST; hypre_SeqVectorInitialize(vector); data = hypre_VectorData(vector); for (j = 0; j < size; j++) { hypre_fscanf(fp, "%le", &data[j]); } fclose(fp); /* multivector code not written yet */ hypre_assert( hypre_VectorNumVectors(vector) == 1 ); return vector; } /*-------------------------------------------------------------------------- * hypre_SeqVectorPrint *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorPrint( hypre_Vector *vector, char *file_name ) { FILE *fp; HYPRE_Complex *data; HYPRE_Int size, num_vectors, vecstride, idxstride; HYPRE_Int i, j; HYPRE_Complex value; HYPRE_Int ierr = 0; num_vectors = hypre_VectorNumVectors(vector); vecstride = hypre_VectorVectorStride(vector); idxstride = hypre_VectorIndexStride(vector); /*---------------------------------------------------------- * Print in the data *----------------------------------------------------------*/ data = hypre_VectorData(vector); size = hypre_VectorSize(vector); fp = fopen(file_name, "w"); if ( hypre_VectorNumVectors(vector) == 1 ) { hypre_fprintf(fp, "%d\n", size); } else { hypre_fprintf(fp, "%d vectors of size %d\n", num_vectors, size ); } if ( num_vectors > 1 ) { for ( j = 0; j < num_vectors; ++j ) { hypre_fprintf(fp, "vector %d\n", j ); for (i = 0; i < size; i++) { value = data[ j * vecstride + i * idxstride ]; #ifdef HYPRE_COMPLEX hypre_fprintf(fp, "%.14e , %.14e\n", hypre_creal(value), hypre_cimag(value)); #else hypre_fprintf(fp, "%.14e\n", value); #endif } } } else { for (i = 0; i < size; i++) { #ifdef HYPRE_COMPLEX hypre_fprintf(fp, "%.14e , %.14e\n", hypre_creal(data[i]), hypre_cimag(data[i])); #else hypre_fprintf(fp, "%.14e\n", data[i]); #endif } } fclose(fp); return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetConstantValues *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetConstantValues( hypre_Vector *v, HYPRE_Complex value ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *vector_data = hypre_VectorData(v); HYPRE_Int size = hypre_VectorSize(v); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(v); //hypre_SeqVectorPrefetch(v, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) if (size > 0) { HYPRE_THRUST_CALL( fill_n, vector_data, size, value ); } #elif defined(HYPRE_USING_SYCL) if (size > 0) { HYPRE_ONEDPL_CALL( std::fill_n, vector_data, size, value ); } #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(vector_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { vector_data[i] = value; } #endif /* defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorSetRandomValues * * returns vector of values randomly distributed between -1.0 and +1.0 *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorSetRandomValues( hypre_Vector *v, HYPRE_Int seed ) { HYPRE_Complex *vector_data = hypre_VectorData(v); HYPRE_Int size = hypre_VectorSize(v); HYPRE_Int i; HYPRE_Int ierr = 0; hypre_SeedRand(seed); size *= hypre_VectorNumVectors(v); if (hypre_GetActualMemLocation(hypre_VectorMemoryLocation(v)) == hypre_MEMORY_HOST) { /* RDF: threading this loop may cause problems because of hypre_Rand() */ for (i = 0; i < size; i++) { vector_data[i] = 2.0 * hypre_Rand() - 1.0; } } else { HYPRE_Complex *h_data = hypre_TAlloc(HYPRE_Complex, size, HYPRE_MEMORY_HOST); for (i = 0; i < size; i++) { h_data[i] = 2.0 * hypre_Rand() - 1.0; } hypre_TMemcpy(vector_data, h_data, HYPRE_Complex, size, hypre_VectorMemoryLocation(v), HYPRE_MEMORY_HOST); hypre_TFree(h_data, HYPRE_MEMORY_HOST); } return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorCopy * copies data from x to y * if size of x is larger than y only the first size_y elements of x are * copied to y *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorCopy( hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Int ierr = 0; size_t size = hypre_min( hypre_VectorSize(x), hypre_VectorSize(y) ) * hypre_VectorNumVectors(x); hypre_TMemcpy( hypre_VectorData(y), hypre_VectorData(x), HYPRE_Complex, size, hypre_VectorMemoryLocation(y), hypre_VectorMemoryLocation(x) ); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorCloneDeep * Returns a complete copy of x - a deep copy, with its own copy of the data. *--------------------------------------------------------------------------*/ hypre_Vector* hypre_SeqVectorCloneDeep_v2( hypre_Vector *x, HYPRE_MemoryLocation memory_location ) { HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); hypre_Vector *y = hypre_SeqMultiVectorCreate( size, num_vectors ); hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x); hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x); hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x); hypre_SeqVectorInitialize_v2(y, memory_location); hypre_SeqVectorCopy( x, y ); return y; } hypre_Vector* hypre_SeqVectorCloneDeep( hypre_Vector *x ) { return hypre_SeqVectorCloneDeep_v2(x, hypre_VectorMemoryLocation(x)); } /*-------------------------------------------------------------------------- * hypre_SeqVectorCloneShallow * Returns a complete copy of x - a shallow copy, pointing the data of x *--------------------------------------------------------------------------*/ hypre_Vector * hypre_SeqVectorCloneShallow( hypre_Vector *x ) { HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int num_vectors = hypre_VectorNumVectors(x); hypre_Vector * y = hypre_SeqMultiVectorCreate( size, num_vectors ); hypre_VectorMultiVecStorageMethod(y) = hypre_VectorMultiVecStorageMethod(x); hypre_VectorVectorStride(y) = hypre_VectorVectorStride(x); hypre_VectorIndexStride(y) = hypre_VectorIndexStride(x); hypre_VectorMemoryLocation(y) = hypre_VectorMemoryLocation(x); hypre_VectorData(y) = hypre_VectorData(x); hypre_SeqVectorSetDataOwner( y, 0 ); hypre_SeqVectorInitialize(y); return y; } /*-------------------------------------------------------------------------- * hypre_SeqVectorScale *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorScale( HYPRE_Complex alpha, hypre_Vector *y ) { /* special cases */ if (alpha == 1.0) { return 0; } if (alpha == 0.0) { return hypre_SeqVectorSetConstantValues(y, 0.0); } #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(y); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(y); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDscal(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, y_data, 1) ); #else HYPRE_THRUST_CALL( transform, y_data, y_data + size, y_data, alpha * _1 ); #endif // #if defined(HYPRE_USING_CUBLAS) #elif defined(HYPRE_USING_SYCL) // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_ONEMKLBLAS) HYPRE_SYCL_CALL( oneapi::mkl::blas::scal(*hypre_HandleComputeStream(hypre_handle()), size, alpha, y_data, 1).wait() ); #else HYPRE_ONEDPL_CALL( std::transform, y_data, y_data + size, y_data, [alpha](HYPRE_Complex y) -> HYPRE_Complex { return alpha * y; } ); #endif // #if defined(HYPRE_USING_ONEMKL) #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #else // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] *= alpha; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /*-------------------------------------------------------------------------- * hypre_SeqVectorAxpy *--------------------------------------------------------------------------*/ HYPRE_Int hypre_SeqVectorAxpy( HYPRE_Complex alpha, hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDaxpy(hypre_HandleCublasHandle(hypre_handle()), size, &alpha, x_data, 1, y_data, 1) ); #else HYPRE_THRUST_CALL( transform, x_data, x_data + size, y_data, y_data, alpha * _1 + _2 ); #endif // #if defined(HYPRE_USING_CUBLAS) #elif defined(HYPRE_USING_SYCL) // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_ONEMKLBLAS) HYPRE_SYCL_CALL( oneapi::mkl::blas::axpy(*hypre_HandleComputeStream(hypre_handle()), size, alpha, x_data, 1, y_data, 1).wait() ); #else HYPRE_ONEDPL_CALL( std::transform, x_data, x_data + size, y_data, y_data, [alpha](HYPRE_Complex x, HYPRE_Complex y) -> HYPRE_Complex { return alpha * x + y; } ); #endif // #if defined(HYPRE_USING_ONEMKL) #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #else // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += alpha * x_data[i]; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } /* y = y + x ./ b */ HYPRE_Int hypre_SeqVectorElmdivpy( hypre_Vector *x, hypre_Vector *b, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *b_data = hypre_VectorData(b); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(b); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) //HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); //RL: TODO back to hypre_GetExecPolicy2 later HYPRE_ExecutionPolicy exec = HYPRE_EXEC_DEVICE; if (exec == HYPRE_EXEC_DEVICE) { //TODO //hypre_SeqVectorElmdivpyDevice(x, b, y); /* #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(u_data,v_data,l1_norms) #endif */ hypreDevice_IVAXPY(size, b_data, x_data, y_data); } else #endif { HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += x_data[i] / b_data[i]; } } #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /* y[i] += x[i] / b[i] where marker[i] == marker_val */ HYPRE_Int hypre_SeqVectorElmdivpyMarked( hypre_Vector *x, hypre_Vector *b, hypre_Vector *y, HYPRE_Int *marker, HYPRE_Int marker_val) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *b_data = hypre_VectorData(b); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(b); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_ExecutionPolicy exec = hypre_GetExecPolicy2( hypre_VectorMemoryLocation(x), hypre_VectorMemoryLocation(b) ); if (exec == HYPRE_EXEC_DEVICE) { hypreDevice_IVAXPYMarked(size, b_data, x_data, y_data, marker, marker_val); } else #endif { HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { if (marker[i] == marker_val) { y_data[i] += x_data[i] / b_data[i]; } } } #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return hypre_error_flag; } /*-------------------------------------------------------------------------- * hypre_SeqVectorInnerProd *--------------------------------------------------------------------------*/ HYPRE_Real hypre_SeqVectorInnerProd( hypre_Vector *x, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Real result = 0.0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #ifndef HYPRE_COMPLEX #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_CUBLAS) HYPRE_CUBLAS_CALL( cublasDdot(hypre_HandleCublasHandle(hypre_handle()), size, x_data, 1, y_data, 1, &result) ); #else result = HYPRE_THRUST_CALL( inner_product, x_data, x_data + size, y_data, 0.0 ); #endif // #if defined(HYPRE_USING_CUBLAS) #elif defined(HYPRE_USING_SYCL) // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #if defined(HYPRE_USING_ONEMKLBLAS) HYPRE_Real *result_dev = hypre_CTAlloc(HYPRE_Real, 1, HYPRE_MEMORY_DEVICE); HYPRE_SYCL_CALL( oneapi::mkl::blas::dot(*hypre_HandleComputeStream(hypre_handle()), size, x_data, 1, y_data, 1, result_dev).wait() ); hypre_TMemcpy(&result, result_dev, HYPRE_Real, 1, HYPRE_MEMORY_HOST, HYPRE_MEMORY_DEVICE); hypre_TFree(result_dev, HYPRE_MEMORY_DEVICE); #else result = HYPRE_ONEDPL_CALL( std::transform_reduce, x_data, x_data + size, y_data, 0.0 ); #endif // #if defined(HYPRE_USING_ONEMKLBLAS) #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) #else // #ifndef HYPRE_COMPLEX /* TODO */ #error "Complex inner product" #endif // #ifndef HYPRE_COMPLEX #else // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) reduction(+:result) is_device_ptr(y_data,x_data) map(result) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) reduction(+:result) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { result += hypre_conj(y_data[i]) * x_data[i]; } #endif // #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) || defined(HYPRE_USING_SYCL) #if defined(HYPRE_USING_GPU) hypre_SyncComputeStream(hypre_handle()); #endif #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return result; } //TODO /*-------------------------------------------------------------------------- * hypre_VectorSumElts: * Returns the sum of all vector elements. *--------------------------------------------------------------------------*/ HYPRE_Complex hypre_SeqVectorSumElts( hypre_Vector *vector ) { HYPRE_Complex sum = 0; HYPRE_Complex *data = hypre_VectorData( vector ); HYPRE_Int size = hypre_VectorSize( vector ); HYPRE_Int i; #ifdef HYPRE_USING_OPENMP #pragma omp parallel for private(i) reduction(+:sum) HYPRE_SMP_SCHEDULE #endif for ( i = 0; i < size; ++i ) { sum += data[i]; } return sum; } HYPRE_Int hypre_SeqVectorPrefetch( hypre_Vector *x, HYPRE_MemoryLocation memory_location) { HYPRE_Int ierr = 0; #ifdef HYPRE_USING_UNIFIED_MEMORY if (hypre_VectorMemoryLocation(x) != HYPRE_MEMORY_DEVICE) { /* hypre_error_w_msg(HYPRE_ERROR_GENERIC," Error! CUDA Prefetch with non-unified momory\n");*/ return 1; } HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Int size = hypre_VectorSize(x) * hypre_VectorNumVectors(x); if (size == 0) { return ierr; } hypre_MemPrefetch(x_data, sizeof(HYPRE_Complex)*size, memory_location); #endif return ierr; } #if 0 /* y[i] = max(alpha*x[i], beta*y[i]) */ HYPRE_Int hypre_SeqVectorMax( HYPRE_Complex alpha, hypre_Vector *x, HYPRE_Complex beta, hypre_Vector *y ) { #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] -= hypre_MPI_Wtime(); #endif HYPRE_Complex *x_data = hypre_VectorData(x); HYPRE_Complex *y_data = hypre_VectorData(y); HYPRE_Int size = hypre_VectorSize(x); HYPRE_Int ierr = 0; size *= hypre_VectorNumVectors(x); //hypre_SeqVectorPrefetch(x, HYPRE_MEMORY_DEVICE); //hypre_SeqVectorPrefetch(y, HYPRE_MEMORY_DEVICE); thrust::maximum<HYPRE_Complex> mx; #if defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) HYPRE_THRUST_CALL( transform, thrust::make_transform_iterator(x_data, alpha * _1), thrust::make_transform_iterator(x_data + size, alpha * _1), thrust::make_transform_iterator(y_data, beta * _1), y_data, mx ); #else HYPRE_Int i; #if defined(HYPRE_USING_DEVICE_OPENMP) #pragma omp target teams distribute parallel for private(i) is_device_ptr(y_data, x_data) #elif defined(HYPRE_USING_OPENMP) #pragma omp parallel for private(i) HYPRE_SMP_SCHEDULE #endif for (i = 0; i < size; i++) { y_data[i] += hypre_max(alpha * x_data[i], beta * y_data[i]); } #endif /* defined(HYPRE_USING_CUDA) || defined(HYPRE_USING_HIP) */ hypre_SyncComputeStream(hypre_handle()); #ifdef HYPRE_PROFILE hypre_profile_times[HYPRE_TIMER_ID_BLAS1] += hypre_MPI_Wtime(); #endif return ierr; } #endif
core_zpotrf.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @precisions normal z -> c d s * **/ #include <plasma_core_blas.h> #include "plasma_types.h" #include "core_lapack.h" /***************************************************************************//** * * @ingroup core_potrf * * Performs the Cholesky factorization of a Hermitian positive definite * matrix A. The factorization has the form * * \f[ A = L \times L^H, \f] * or * \f[ A = U^H \times U, \f] * * where U is an upper triangular matrix and L is a lower triangular matrix. * ******************************************************************************* * * @param[in] uplo * - PlasmaUpper: Upper triangle of A is stored; * - PlasmaLower: Lower triangle of A is stored. * * @param[in] n * The order of the matrix A. n >= 0. * * @param[in,out] A * On entry, the Hermitian positive definite matrix A. * If uplo = PlasmaUpper, the leading N-by-N upper triangular part of A * contains the upper triangular part of the matrix A, and the strictly * lower triangular part of A is not referenced. * If uplo = PlasmaLower, the leading N-by-N lower triangular part of A * contains the lower triangular part of the matrix A, and the strictly * upper triangular part of A is not referenced. * On exit, if return value = 0, the factor U or L from the Cholesky * factorization A = U^H*U or A = L*L^H. * * @param[in] lda * The leading dimension of the array A. lda >= max(1,n). * ******************************************************************************/ __attribute__((weak)) int plasma_core_zpotrf(plasma_enum_t uplo, int n, plasma_complex64_t *A, int lda) { return LAPACKE_zpotrf_work(LAPACK_COL_MAJOR, lapack_const(uplo), n, A, lda); } /******************************************************************************/ void plasma_core_omp_zpotrf(plasma_enum_t uplo, int n, plasma_complex64_t *A, int lda, int iinfo, plasma_sequence_t *sequence, plasma_request_t *request) { #pragma omp task depend(inout:A[0:lda*n]) { if (sequence->status == PlasmaSuccess) { int info = plasma_core_zpotrf(uplo, n, A, lda); if (info != 0) plasma_request_fail(sequence, request, iinfo+info); } } }
LU_OpenMP.c
#include<stdio.h> #include<stdlib.h> #include<malloc.h> #include<omp.h> //线程数 int n_threads = 1; //方阵 int **A; int N = 0; int **L, **U; //从文件中读取矩阵A void read_A(const char *input_file) { FILE* fp = fopen(input_file,"r"); int m, n; fscanf(fp, "%d %d", &m, &n); if (m != n) { perror("矩阵A不是方阵,请检查输入数据!"); exit(-1); } N = n; A = (int **)calloc(N, sizeof(int*)); for (int i = 0; i < N; i++) { A[i] = (int *)calloc(N, sizeof(int)); for (int j = 0; j < N; j++) { fscanf(fp, "%d", &A[i][j]); } } //生成和初始化 L、U矩阵 L = (int **)calloc(N, sizeof(int*)); for (int i = 0; i < N; i++) { L[i] = (int *)calloc(N, sizeof(int)); for (int j = 0; j < N; j++) { L[i][j] = 0; } } U = (int **)calloc(N, sizeof(int*)); for (int i = 0; i < N; i++) { U[i] = (int *)calloc(N, sizeof(int)); for (int j = 0; j < N; j++) { U[i][j] = 0; } } } //按公式计算,L、U矩阵中i行j列的累加和(K个累加和) int sum_i_j_K(int i, int j, int K) { int res = 0; for (int k = 0; k < K; k++) { res += L[i][k] * U[k][j]; } return res; } //输出L、U矩阵到文件中 void printLU() { FILE* fpL = fopen("L.out", "w"); FILE* fpU = fopen("U.out", "w"); for (int i = 0; i < N; i++) { for (int j = 0; j < N; j++) { fprintf(fpL, "%d ", L[i][j]); fprintf(fpU, "%d ", U[i][j]); } fprintf(fpL, "\n"); fprintf(fpU, "\n"); } } int main(int argc, char *argv[]) { //设置线程数 if (argc >= 2) n_threads = atoi(argv[1]); //初始化矩阵A if (argc >= 3) { //通过运行参数,指定输入文件 read_A(argv[2]); } else { //默认使用"LU.in"作为输入文件 read_A("LU.in"); } //设置线程数 omp_set_num_threads(n_threads); //计时开始 double ts = omp_get_wtime(); //计算L、U矩阵 for (int i = 0; i < N; i++) { U[i][i] = A[i][i] - sum_i_j_K(i, i, i); L[i][i] = 1; #pragma omp parallel for for (int j = i+1; j < N; j++) { //按照递推公式进行计算 U[i][j] = A[i][j] - sum_i_j_K(i, j, i); L[j][i] = (A[j][i] - sum_i_j_K(j, i, i)) / U[i][i]; } } //输出L、U矩阵到文件中 printLU(); //计时结束 double te = omp_get_wtime(); printf("Time:%f s\n", te - ts); }
scatter.c
#include "../../shared.h" #include "../hale_data.h" #include "hale.h" #include <float.h> #include <stdio.h> // Scatter the subcell energy and mass quantities back to the cell centers void scatter_energy_and_mass( const int ncells, const double* nodes_x, const double* nodes_y, const double* nodes_z, double* cell_volume, double* energy, double* density, double* ke_mass, double* velocity_x, double* velocity_y, double* velocity_z, double* cell_mass, double* subcell_mass, double* subcell_ie_mass, double* subcell_ke_mass, int* faces_to_nodes, int* faces_to_nodes_offsets, int* cells_to_faces_offsets, int* cells_to_faces, int* cells_to_nodes_offsets, int* cells_to_nodes, double initial_mass, double initial_ie_mass, double initial_ke_mass); // Scatter the subcell momentum to the node centered velocities void scatter_momentum(const int nnodes, vec_t* initial_momentum, int* nodes_to_cells_offsets, int* nodes_to_cells, int* cells_to_nodes_offsets, int* cells_to_nodes, double* velocity_x, double* velocity_y, double* velocity_z, double* nodal_mass, double* subcell_mass, double* subcell_momentum_x, double* subcell_momentum_y, double* subcell_momentum_z); // Perform the scatter step of the ALE remapping algorithm void scatter_phase(UnstructuredMesh* umesh, HaleData* hale_data, vec_t* initial_momentum, double initial_mass, double initial_ie_mass, double initial_ke_mass) { // Calculates the cell volume, subcell volume and the subcell centroids calc_volumes_centroids( umesh->ncells, umesh->nnodes, hale_data->nnodes_by_subcell, umesh->cells_to_nodes_offsets, umesh->cells_to_nodes, hale_data->subcells_to_faces_offsets, hale_data->subcells_to_faces, umesh->faces_to_nodes, umesh->faces_to_nodes_offsets, umesh->faces_cclockwise_cell, umesh->nodes_x0, umesh->nodes_y0, umesh->nodes_z0, hale_data->subcell_centroids_x, hale_data->subcell_centroids_y, hale_data->subcell_centroids_z, hale_data->subcell_volume, hale_data->cell_volume, hale_data->nodal_volumes, umesh->nodes_to_cells_offsets, umesh->nodes_to_cells); // Scatter the subcell momentum to the node centered velocities scatter_momentum( umesh->nnodes, initial_momentum, umesh->nodes_to_cells_offsets, umesh->nodes_to_cells, umesh->cells_to_nodes_offsets, umesh->cells_to_nodes, hale_data->velocity_x0, hale_data->velocity_y0, hale_data->velocity_z0, hale_data->nodal_mass, hale_data->subcell_mass, hale_data->subcell_momentum_x, hale_data->subcell_momentum_y, hale_data->subcell_momentum_z); // Scatter the subcell energy and mass quantities back to the cell centers scatter_energy_and_mass( umesh->ncells, umesh->nodes_x0, umesh->nodes_y0, umesh->nodes_z0, hale_data->cell_volume, hale_data->energy0, hale_data->density0, hale_data->ke_mass, hale_data->velocity_x0, hale_data->velocity_y0, hale_data->velocity_z0, hale_data->cell_mass, hale_data->subcell_mass, hale_data->subcell_ie_mass, hale_data->subcell_ke_mass, umesh->faces_to_nodes, umesh->faces_to_nodes_offsets, umesh->cells_to_faces_offsets, umesh->cells_to_faces, umesh->cells_to_nodes_offsets, umesh->cells_to_nodes, initial_mass, initial_ie_mass, initial_ke_mass); } // Scatter the subcell energy and mass quantities back to the cell centers void scatter_energy_and_mass( const int ncells, const double* nodes_x, const double* nodes_y, const double* nodes_z, double* cell_volume, double* energy, double* density, double* ke_mass, double* velocity_x, double* velocity_y, double* velocity_z, double* cell_mass, double* subcell_mass, double* subcell_ie_mass, double* subcell_ke_mass, int* faces_to_nodes, int* faces_to_nodes_offsets, int* cells_to_faces_offsets, int* cells_to_faces, int* cells_to_nodes_offsets, int* cells_to_nodes, double initial_mass, double initial_ie_mass, double initial_ke_mass) { // Scatter energy and density, and print the conservation of mass double rz_total_mass = 0.0; double rz_total_e_mass = 0.0; #pragma omp parallel for reduction(+ : rz_total_mass, rz_total_e_mass) for (int cc = 0; cc < ncells; ++cc) { const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)]; const int nnodes_by_cell = cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off; const int cell_to_faces_off = cells_to_faces_offsets[(cc)]; const int nfaces_by_cell = cells_to_faces_offsets[(cc + 1)] - cell_to_faces_off; double total_mass = 0.0; double new_ke_mass = 0.0; double total_ie_mass = 0.0; double total_ke_mass = 0.0; for (int nn = 0; nn < nnodes_by_cell; ++nn) { const int node_index = cells_to_nodes[(cell_to_nodes_off + nn)]; const int subcell_index = cell_to_nodes_off + nn; total_mass += subcell_mass[(subcell_index)]; total_ie_mass += subcell_ie_mass[(subcell_index)]; total_ke_mass += subcell_ke_mass[(subcell_index)]; new_ke_mass += subcell_mass[(subcell_index)] * 0.5 * (velocity_x[(node_index)] * velocity_x[(node_index)] + velocity_y[(node_index)] * velocity_y[(node_index)] + velocity_z[(node_index)] * velocity_z[(node_index)]); } // Update the volume of the cell to the new rezoned mesh vec_t cell_c = {0.0, 0.0, 0.0}; calc_centroid(nnodes_by_cell, nodes_x, nodes_y, nodes_z, cells_to_nodes, cell_to_nodes_off, &cell_c); calc_volume(cell_to_faces_off, nfaces_by_cell, cells_to_faces, faces_to_nodes, faces_to_nodes_offsets, nodes_x, nodes_y, nodes_z, &cell_c, &cell_volume[(cc)]); // Scatter the energy and density cell_mass[(cc)] = total_mass; density[(cc)] = cell_mass[(cc)] / cell_volume[(cc)]; const double total_e_mass = total_ie_mass + (total_ke_mass - new_ke_mass); energy[(cc)] = total_e_mass / cell_mass[(cc)]; // Calculate the conservation data rz_total_mass += total_mass; rz_total_e_mass += total_e_mass; } printf("Initial Total Mass %.12f\n", initial_mass); printf("Rezoned Total Mass %.12f\n", rz_total_mass); printf("Difference %.12f\n\n", rz_total_mass - initial_mass); printf("Initial Total Energy %.12f\n", (initial_ie_mass + initial_ke_mass)); printf("Rezoned Total Internal Energy %.12f\n", rz_total_e_mass); printf("Difference %.12f\n\n", rz_total_e_mass - (initial_ie_mass + initial_ke_mass)); } // Scatter the subcell momentum to the node centered velocities void scatter_momentum(const int nnodes, vec_t* initial_momentum, int* nodes_to_cells_offsets, int* nodes_to_cells, int* cells_to_nodes_offsets, int* cells_to_nodes, double* velocity_x, double* velocity_y, double* velocity_z, double* nodal_mass, double* subcell_mass, double* subcell_momentum_x, double* subcell_momentum_y, double* subcell_momentum_z) { double total_momentum_x = 0.0; double total_momentum_y = 0.0; double total_momentum_z = 0.0; #pragma omp parallel for reduction(+ : total_momentum_x, total_momentum_y, \ total_momentum_z) for (int nn = 0; nn < nnodes; ++nn) { const int node_to_cells_off = nodes_to_cells_offsets[(nn)]; const int ncells_by_node = nodes_to_cells_offsets[(nn + 1)] - node_to_cells_off; double mass_at_node = 0.0; double node_momentum_x = 0.0; double node_momentum_y = 0.0; double node_momentum_z = 0.0; for (int cc = 0; cc < ncells_by_node; ++cc) { const int cell_index = nodes_to_cells[(node_to_cells_off + cc)]; const int cell_to_nodes_off = cells_to_nodes_offsets[(cell_index)]; const int nnodes_by_cell = cells_to_nodes_offsets[(cell_index + 1)] - cell_to_nodes_off; // Determine the position of the node in the cell int nn2; for (nn2 = 0; nn2 < nnodes_by_cell; ++nn2) { if (cells_to_nodes[(cell_to_nodes_off + nn2)] == nn) { break; } } const int subcell_index = cell_to_nodes_off + nn2; node_momentum_x += subcell_momentum_x[(subcell_index)]; node_momentum_y += subcell_momentum_y[(subcell_index)]; node_momentum_z += subcell_momentum_z[(subcell_index)]; mass_at_node += subcell_mass[(subcell_index)]; } nodal_mass[(nn)] = mass_at_node; total_momentum_x += node_momentum_x; total_momentum_y += node_momentum_y; total_momentum_z += node_momentum_z; velocity_x[(nn)] = node_momentum_x / nodal_mass[(nn)]; velocity_y[(nn)] = node_momentum_y / nodal_mass[(nn)]; velocity_z[(nn)] = node_momentum_z / nodal_mass[(nn)]; } printf("Initial total momentum %.12f %.12f %.12f\n", initial_momentum->x, initial_momentum->y, initial_momentum->z); printf("Rezoned total momentum %.12f %.12f %.12f\n", total_momentum_x, total_momentum_y, total_momentum_z); printf("Difference %.12f %.12f %.12f\n\n", initial_momentum->x - total_momentum_x, initial_momentum->y - total_momentum_y, initial_momentum->z - total_momentum_z); }
Grid3D.h
// // Grid3D.h // Copyright (c) 2016-2018 // author: Douglas Creel // #ifndef GRID3D_H #define GRID3D_H #include "Vesuvius.h" enum GRIDBEHAVIOR { CLAMPED, REPEAT, ZERO }; template <typename T> class Grid3D { public: Grid3D() { } ~Grid3D() { } void init(int nx, int ny, int nz, T val) { m_nvoxels = nx*ny*nz; m_xsize = nx; m_ysize = ny; m_zsize = nz; data.resize(m_nvoxels); #pragma omp parallel for for (int i = 0; i < m_nvoxels; i++) { data[i] = val; } } int IX(int x, int y, int z) { if (!(x < m_xsize)) x = m_xsize - 1; if (!(y < m_ysize)) y = m_ysize - 1; if (!(z < m_zsize)) z = m_zsize - 1; if (!(x > 0)) x = 0; if (!(y > 0)) y = 0; if (!(z > 0)) z = 0; return x * m_ysize * m_zsize + y * m_zsize + z; } inline void set(T f, int x, int y, int z) { data[IX(x, y, z)] = f; } typename T& operator() (int x, int y, int z) { if (m_behavior == CLAMPED) { if (!(x < m_xsize)) x = m_xsize - 1; if (!(y < m_ysize)) y = m_ysize - 1; if (!(z < m_zsize)) z = m_zsize - 1; if (!(x > 0)) x = 0; if (!(y > 0)) y = 0; if (!(z > 0)) z = 0; } else if (m_behavior == ZERO) { T retval; if ((x < 0 || x > m_xsize) || (y < 0 || y > m_ysize) || (z < 0 || z > m_zsize)) return retval; } assert(x >= 0 && x < m_xsize); assert(y >= 0 && y < m_ysize); assert(z >= 0 && z < m_zsize); return data[IX(x, y, z)]; } inline typename T& operator[] (int i) { assert(i >= 0 && i < m_nvoxels); return data[i]; } inline void setIndexValue(int i, T f) { data[i] = f; } inline int getNumVoxels() { return m_nvoxels; } inline int getXSize() { return m_xsize; } inline int getYSize() { return m_ysize; } inline int getZSize() { return m_zsize; } inline void setBoundaryBehavior(GRIDBEHAVIOR mode) { m_behavior = mode; } inline void setZero() { #pragma omp parallel for for (int i = 0; i < m_nvoxels; i++) { data[i] = 0.0; } } inline T maxval() const { T r = 0; for (int i = 0; i < m_nvoxels; i++) if (!(std::fabs(data[i]) <= r)) r = std::fabs(data[i]); return r; } inline void clear() { data.clear(); } typename T trilinearInterpolation(T x, T y, T z) { T retval; T c0, c1, c2, c3, c4, c5, c6, c7; int i = (int)abs(floor(x)); int j = (int)abs(floor(y)); int k = (int)abs(floor(z)); c0 = (i + 1 - x) * (j + 1 - y) * (k + 1 - z) * data[IX(i, j, k)]; c1 = (x - i) * (j + 1 - y) * (k + 1 - z) * data[IX(i + 1, j, k)]; c2 = (i + 1 - x) * (y - j) * (k + 1 - z) * data[IX(i, j + 1, k)]; c3 = (x - i) * (y - j) * (k + 1 - z) * data[IX(i + 1, j + 1, k)]; c4 = (i + 1 - x) * (j + 1 - y) * (z - k) * data[IX(i, j, k + 1)]; c5 = (x - i) * (j + 1 - y) * (z - k) * data[IX(i + 1, j, k + 1)]; c6 = (i + 1 - x) * (y - j) * (z - k) * data[IX(i, j + 1, k + 1)]; c7 = (x - i) * (y - j) * (z - k) * data[IX(i + 1, j + 1, k + 1)]; retval = c0 + c1 + c2 + c3 + c4 + c5 + c6 + c7; return retval; } // // code attribution: "Visual Simulation of Smoke", Fedkiw, Stam, Jensen // typename T monotonicCubicInterpolation(T t, T f[4]) { T retval; T d_k = T(0.5) * (f[2] - f[0]); T d_k1 = T(0.5) * (f[3] - f[1]); T delta_k = f[2] - f[1]; if (delta_k == static_cast<T>(0)) { d_k = static_cast<T>(0); d_k1 = static_cast<T>(0); } T a0 = f[1]; T a1 = d_k; T a2 = (T(3) * delta_k) - (T(2) * d_k) - d_k1; T a3 = d_k + d_k1 - (T(2) * delta_k); T t1 = t; T t2 = t * t; T t3 = t2 * t1; retval = a3 * t3 + a2 * t2 + a1 * t1 + a0; return retval; } typename T cubicInterpolation(T x, T p[4]) { T retval; retval = p[1] + 0.5 * x * (p[2] - p[0] + x * (2.0 * p[0] - 5.0 * p[1] + 4.0 * p[2] - p[3] + x * (3.0 * (p[1] - p[2]) + p[3] - p[0]))); return retval; } typename T bicubicInterpolation(T x, T y, T p[4][4]) { T retval; T interps[4]; interps[0] = cubicInterpolation(y, p[0]); interps[1] = cubicInterpolation(y, p[1]); interps[2] = cubicInterpolation(y, p[2]); interps[3] = cubicInterpolation(y, p[3]); //retval = monotonicCubicInterpolation(x, interps); retval = cubicInterpolation(x, interps); return retval; } typename T tricubicInterpolation(T x, T y, T z) { T retval; int i = (int)abs(floor(x)); int j = (int)abs(floor(y)); int k = (int)abs(floor(z)); T p[4][4][4]; for (int nj = j - 1, xj = 0; nj < j + 3; nj++, xj++) { for (int nk = k - 1, xk = 0; nk < k + 3; nk++, xk++) { p[0][xj][xk] = data[IX(i - 1, nj, nk)]; p[1][xj][xk] = data[IX(i, nj, nk)]; p[2][xj][xk] = data[IX(i + 1, nj, nk)]; p[3][xj][xk] = data[IX(i + 2, nj, nk)]; } } T interps[4]; interps[0] = bicubicInterpolation(y - j, z - k, p[0]); interps[1] = bicubicInterpolation(y - j, z - k, p[1]); interps[2] = bicubicInterpolation(y - j, z - k, p[2]); interps[3] = bicubicInterpolation(y - j, z - k, p[3]); //retval = monotonicCubicInterpolation(x - i, interps); retval = cubicInterpolation(x - i, interps); return retval; } private: std::vector<T> data; int m_xsize, m_ysize, m_zsize; int m_nvoxels; GRIDBEHAVIOR m_behavior = CLAMPED; }; #endif
multisort-omp-task-rama-cutoff&depend.c
#include <malloc.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <unistd.h> #include "omp.h" #include <sys/time.h> double getusec_() { struct timeval time; gettimeofday(&time, NULL); return ((double)time.tv_sec * (double)1e6 + (double)time.tv_usec); } #define START_COUNT_TIME stamp = getusec_(); #define STOP_COUNT_TIME(_m) stamp = getusec_() - stamp;\ stamp = stamp/1e6;\ printf ("%s: %0.6f\n",(_m), stamp); // N and MIN must be powers of 2 long N; long MIN_SORT_SIZE; long MIN_MERGE_SIZE; int CUTOFF; #define BLOCK_SIZE 1024L #define T int void basicsort(long n, T data[n]); void basicmerge(long n, T left[n], T right[n], T result[n*2], long start, long length); void merge(long n, T left[n], T right[n], T result[n*2], long start, long length, int depth) { if (length < MIN_MERGE_SIZE*2L) { // Base case basicmerge(n, left, right, result, start, length); } else { // Recursive decomposition if(!omp_in_final()){ #pragma omp task final (depth >= CUTOFF) merge(n, left, right, result, start, length/2, depth+1 ); #pragma omp task final (depth >= CUTOFF) merge(n, left, right, result, start + length/2, length/2, depth+1); #pragma omp taskwait }else{ merge(n, left, right, result, start, length/2, depth+1); merge(n, left, right, result, start + length/2, length/2, depth+1); } } } void multisort(long n, T data[n], T tmp[n], int depth) { if (n >= MIN_SORT_SIZE*4L) { // Recursive decomposition if(!omp_in_final()){ #pragma omp task final (depth >= CUTOFF) depend(out: data[0]) multisort(n/4L, &data[0], &tmp[0], depth+1); #pragma omp task final (depth >= CUTOFF) depend(out: data[n/4L]) multisort(n/4L, &data[n/4L], &tmp[n/4L], depth+1); #pragma omp task final (depth >= CUTOFF)depend(out: data[n/2L]) multisort(n/4L, &data[n/2L], &tmp[n/2L], depth+1); #pragma omp task final (depth >= CUTOFF)depend(out: data[3L*n/4L]) multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L], depth+1); #pragma omp task final (depth >= CUTOFF) depend(in: data[0], data[n/4L]) depend(out: tmp[0]) merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L,0); #pragma omp task final (depth >= CUTOFF) depend(in: data[n/2L], data[3L*n/4L]) depend(out: tmp[n/2L]) merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L,0); #pragma omp task final (depth >= CUTOFF) depend(in: tmp[0], tmp[n/2L]) depend(out: data[0]) merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n,0); #pragma omp taskwait }else{ multisort(n/4L, &data[0], &tmp[0], depth+1); multisort(n/4L, &data[n/4L], &tmp[n/4L], depth+1); multisort(n/4L, &data[n/2L], &tmp[n/2L], depth+1); multisort(n/4L, &data[3L*n/4L], &tmp[3L*n/4L], depth+1); merge(n/4L, &data[0], &data[n/4L], &tmp[0], 0, n/2L,0); merge(n/4L, &data[n/2L], &data[3L*n/4L], &tmp[n/2L], 0, n/2L,0); merge(n/2L, &tmp[0], &tmp[n/2L], &data[0], 0, n,0); } } else { // Base case basicsort(n, data); } } static void initialize(long length, T data[length]) { long i; for (i = 0; i < length; i++) { if (i==0) { data[i] = rand(); } else { data[i] = ((data[i-1]+1) * i * 104723L) % N; } } } static void clear(long length, T data[length]) { long i; for (i = 0; i < length; i++) { data[i] = 0; } } void check_sorted(long n, T data[n]) { int unsorted=0; for (int i=1; i<n; i++) if (data[i-1] > data[i]) unsorted++; if (unsorted > 0) printf ("\nERROR: data is NOT properly sorted. There are %d unordered positions\n\n",unsorted); else { // printf ("data IS ordered; "); } } int main(int argc, char **argv) { /* Defaults for command line arguments */ N = 32768 * BLOCK_SIZE; MIN_SORT_SIZE = 32 * BLOCK_SIZE; MIN_MERGE_SIZE = 32 * BLOCK_SIZE;; CUTOFF = 4; /* Process command-line arguments */ for (int i=1; i<argc; i++) { if (strcmp(argv[i], "-n")==0) { N = atol(argv[++i]) * BLOCK_SIZE; } else if (strcmp(argv[i], "-s")==0) { MIN_SORT_SIZE = atol(argv[++i]) * BLOCK_SIZE; } else if (strcmp(argv[i], "-m")==0) { MIN_MERGE_SIZE = atol(argv[++i]) * BLOCK_SIZE; } else if (strcmp(argv[i], "-c")==0) { CUTOFF = atoi(argv[++i]); } else { fprintf(stderr, "Usage: %s [-n vector_size -s MIN_SORT_SIZE -m MIN_MERGE_SIZE]\n", argv[0]); fprintf(stderr, " -n to specify the size of the vector (in Kelements) to sort (default 32768)\n"); fprintf(stderr, " -s to specify the size of the vector (in Kelements) that breaks recursion in the sort phase (default 32)\n"); fprintf(stderr, " -m to specify the size of the vector (in Kelements) that breaks recursion in the merge phase (default 32)\n"); fprintf(stderr, " -c to specify the cut off recursion level to stop task generation in OpenMP (default 4)\n"); return EXIT_FAILURE; } } fprintf(stdout, "Arguments (Kelements): N=%ld, MIN_SORT_SIZE=%ld, MIN_MERGE_SIZE=%ld\n", N/BLOCK_SIZE, MIN_SORT_SIZE/BLOCK_SIZE, MIN_MERGE_SIZE/BLOCK_SIZE); fprintf(stdout, " CUTOFF=%d\n", CUTOFF); T *data = malloc(N*sizeof(T)); T *tmp = malloc(N*sizeof(T)); double stamp; START_COUNT_TIME; initialize(N, data); clear(N, tmp); STOP_COUNT_TIME("Initialization time in seconds"); START_COUNT_TIME; #pragma omp parallel #pragma omp single multisort(N, data, tmp,0); STOP_COUNT_TIME("Multisort execution time"); START_COUNT_TIME; check_sorted (N, data); STOP_COUNT_TIME("Check sorted data execution time"); fprintf(stdout, "Multisort program finished\n"); return 0; }
linAlgAXMY.c
/* The MIT License (MIT) Copyright (c) 2017 Tim Warburton, Noel Chalmers, Jesse Chan, Ali Karakus Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. */ extern "C" void axmy(const dlong & N, const dfloat& alpha, const dfloat * __restrict__ cpu_w, dfloat * __restrict__ cpu_a){ #pragma omp parallel for for(int i=0;i<N;++i){ const dfloat ai = cpu_a[i]; const dfloat wi = cpu_w[i]; cpu_a[i] = alpha*ai*wi; } } extern "C" void axmyMany(const dlong & N, const dlong & Nfields, const dlong & offset, const dlong & mode, const dfloat & alpha, const dfloat * __restrict__ cpu_w, dfloat * __restrict__ cpu_a){ #pragma omp parallel for collapse(2) for(int fld=0;fld<Nfields;fld++) { for(int i=0;i<N;++i){ const dlong id = i + fld*offset; const dfloat ai = cpu_a[id]; const dfloat wi = cpu_w[i + mode * fld * offset]; cpu_a[id] = alpha*ai*wi; } } } extern "C" void axmyVector(const dlong & N, const dlong & offset, const dlong & mode, const dfloat & alpha, const dfloat * __restrict__ cpu_w, dfloat * __restrict__ cpu_a){ #pragma omp parallel for collapse(2) for(int fld=0;fld<p_NVec;fld++) { for(int i=0;i<N;++i){ const dlong id = i + fld*offset; const dfloat ai = cpu_a[id]; const dfloat wi = cpu_w[i + mode * fld * offset]; cpu_a[id] = alpha*ai*wi; } } }
GB_unop__tgamma_fp64_fp64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__tgamma_fp64_fp64) // op(A') function: GB (_unop_tran__tgamma_fp64_fp64) // C type: double // A type: double // cast: double cij = aij // unaryop: cij = tgamma (aij) #define GB_ATYPE \ double #define GB_CTYPE \ double // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = tgamma (x) ; // casting #define GB_CAST(z, aij) \ double z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ double aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ double z = aij ; \ Cx [pC] = tgamma (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_TGAMMA || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__tgamma_fp64_fp64) ( double *Cx, // Cx and Ax may be aliased const double *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; // TODO: if OP is ONE and uniform-valued matrices are exploited, then // do this in O(1) time if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { double aij = Ax [p] ; double z = aij ; Cx [p] = tgamma (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; double aij = Ax [p] ; double z = aij ; Cx [p] = tgamma (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__tgamma_fp64_fp64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
semantics.c
/* Perform the semantic phase of parsing, i.e., the process of building tree structure, checking semantic consistency, and building RTL. These routines are used both during actual parsing and during the instantiation of template functions. Copyright (C) 1998-2015 Free Software Foundation, Inc. Written by Mark Mitchell (mmitchell@usa.net) based on code found formerly in parse.y and pt.c. This file is part of GCC. GCC is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. GCC is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with GCC; see the file COPYING3. If not see <http://www.gnu.org/licenses/>. */ #include "config.h" #include "system.h" #include "coretypes.h" #include "tm.h" #include "hash-set.h" #include "machmode.h" #include "vec.h" #include "double-int.h" #include "input.h" #include "alias.h" #include "symtab.h" #include "wide-int.h" #include "inchash.h" #include "tree.h" #include "stmt.h" #include "varasm.h" #include "stor-layout.h" #include "stringpool.h" #include "cp-tree.h" #include "c-family/c-common.h" #include "c-family/c-objc.h" #include "tree-inline.h" #include "intl.h" #include "toplev.h" #include "flags.h" #include "timevar.h" #include "diagnostic.h" #include "hash-map.h" #include "is-a.h" #include "plugin-api.h" #include "hard-reg-set.h" #include "input.h" #include "function.h" #include "ipa-ref.h" #include "cgraph.h" #include "tree-iterator.h" #include "target.h" #include "hash-table.h" #include "gimplify.h" #include "bitmap.h" #include "omp-low.h" #include "builtins.h" #include "convert.h" #include "gomp-constants.h" /* There routines provide a modular interface to perform many parsing operations. They may therefore be used during actual parsing, or during template instantiation, which may be regarded as a degenerate form of parsing. */ static tree maybe_convert_cond (tree); static tree finalize_nrv_r (tree *, int *, void *); static tree capture_decltype (tree); /* Deferred Access Checking Overview --------------------------------- Most C++ expressions and declarations require access checking to be performed during parsing. However, in several cases, this has to be treated differently. For member declarations, access checking has to be deferred until more information about the declaration is known. For example: class A { typedef int X; public: X f(); }; A::X A::f(); A::X g(); When we are parsing the function return type `A::X', we don't really know if this is allowed until we parse the function name. Furthermore, some contexts require that access checking is never performed at all. These include class heads, and template instantiations. Typical use of access checking functions is described here: 1. When we enter a context that requires certain access checking mode, the function `push_deferring_access_checks' is called with DEFERRING argument specifying the desired mode. Access checking may be performed immediately (dk_no_deferred), deferred (dk_deferred), or not performed (dk_no_check). 2. When a declaration such as a type, or a variable, is encountered, the function `perform_or_defer_access_check' is called. It maintains a vector of all deferred checks. 3. The global `current_class_type' or `current_function_decl' is then setup by the parser. `enforce_access' relies on these information to check access. 4. Upon exiting the context mentioned in step 1, `perform_deferred_access_checks' is called to check all declaration stored in the vector. `pop_deferring_access_checks' is then called to restore the previous access checking mode. In case of parsing error, we simply call `pop_deferring_access_checks' without `perform_deferred_access_checks'. */ typedef struct GTY(()) deferred_access { /* A vector representing name-lookups for which we have deferred checking access controls. We cannot check the accessibility of names used in a decl-specifier-seq until we know what is being declared because code like: class A { class B {}; B* f(); } A::B* A::f() { return 0; } is valid, even though `A::B' is not generally accessible. */ vec<deferred_access_check, va_gc> * GTY(()) deferred_access_checks; /* The current mode of access checks. */ enum deferring_kind deferring_access_checks_kind; } deferred_access; /* Data for deferred access checking. */ static GTY(()) vec<deferred_access, va_gc> *deferred_access_stack; static GTY(()) unsigned deferred_access_no_check; /* Save the current deferred access states and start deferred access checking iff DEFER_P is true. */ void push_deferring_access_checks (deferring_kind deferring) { /* For context like template instantiation, access checking disabling applies to all nested context. */ if (deferred_access_no_check || deferring == dk_no_check) deferred_access_no_check++; else { deferred_access e = {NULL, deferring}; vec_safe_push (deferred_access_stack, e); } } /* Save the current deferred access states and start deferred access checking, continuing the set of deferred checks in CHECKS. */ void reopen_deferring_access_checks (vec<deferred_access_check, va_gc> * checks) { push_deferring_access_checks (dk_deferred); if (!deferred_access_no_check) deferred_access_stack->last().deferred_access_checks = checks; } /* Resume deferring access checks again after we stopped doing this previously. */ void resume_deferring_access_checks (void) { if (!deferred_access_no_check) deferred_access_stack->last().deferring_access_checks_kind = dk_deferred; } /* Stop deferring access checks. */ void stop_deferring_access_checks (void) { if (!deferred_access_no_check) deferred_access_stack->last().deferring_access_checks_kind = dk_no_deferred; } /* Discard the current deferred access checks and restore the previous states. */ void pop_deferring_access_checks (void) { if (deferred_access_no_check) deferred_access_no_check--; else deferred_access_stack->pop (); } /* Returns a TREE_LIST representing the deferred checks. The TREE_PURPOSE of each node is the type through which the access occurred; the TREE_VALUE is the declaration named. */ vec<deferred_access_check, va_gc> * get_deferred_access_checks (void) { if (deferred_access_no_check) return NULL; else return (deferred_access_stack->last().deferred_access_checks); } /* Take current deferred checks and combine with the previous states if we also defer checks previously. Otherwise perform checks now. */ void pop_to_parent_deferring_access_checks (void) { if (deferred_access_no_check) deferred_access_no_check--; else { vec<deferred_access_check, va_gc> *checks; deferred_access *ptr; checks = (deferred_access_stack->last ().deferred_access_checks); deferred_access_stack->pop (); ptr = &deferred_access_stack->last (); if (ptr->deferring_access_checks_kind == dk_no_deferred) { /* Check access. */ perform_access_checks (checks, tf_warning_or_error); } else { /* Merge with parent. */ int i, j; deferred_access_check *chk, *probe; FOR_EACH_VEC_SAFE_ELT (checks, i, chk) { FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, j, probe) { if (probe->binfo == chk->binfo && probe->decl == chk->decl && probe->diag_decl == chk->diag_decl) goto found; } /* Insert into parent's checks. */ vec_safe_push (ptr->deferred_access_checks, *chk); found:; } } } } /* Perform the access checks in CHECKS. The TREE_PURPOSE of each node is the BINFO indicating the qualifying scope used to access the DECL node stored in the TREE_VALUE of the node. If CHECKS is empty or we aren't in SFINAE context or all the checks succeed return TRUE, otherwise FALSE. */ bool perform_access_checks (vec<deferred_access_check, va_gc> *checks, tsubst_flags_t complain) { int i; deferred_access_check *chk; location_t loc = input_location; bool ok = true; if (!checks) return true; FOR_EACH_VEC_SAFE_ELT (checks, i, chk) { input_location = chk->loc; ok &= enforce_access (chk->binfo, chk->decl, chk->diag_decl, complain); } input_location = loc; return (complain & tf_error) ? true : ok; } /* Perform the deferred access checks. After performing the checks, we still have to keep the list `deferred_access_stack->deferred_access_checks' since we may want to check access for them again later in a different context. For example: class A { typedef int X; static X a; }; A::X A::a, x; // No error for `A::a', error for `x' We have to perform deferred access of `A::X', first with `A::a', next with `x'. Return value like perform_access_checks above. */ bool perform_deferred_access_checks (tsubst_flags_t complain) { return perform_access_checks (get_deferred_access_checks (), complain); } /* Defer checking the accessibility of DECL, when looked up in BINFO. DIAG_DECL is the declaration to use to print diagnostics. Return value like perform_access_checks above. */ bool perform_or_defer_access_check (tree binfo, tree decl, tree diag_decl, tsubst_flags_t complain) { int i; deferred_access *ptr; deferred_access_check *chk; /* Exit if we are in a context that no access checking is performed. */ if (deferred_access_no_check) return true; gcc_assert (TREE_CODE (binfo) == TREE_BINFO); ptr = &deferred_access_stack->last (); /* If we are not supposed to defer access checks, just check now. */ if (ptr->deferring_access_checks_kind == dk_no_deferred) { bool ok = enforce_access (binfo, decl, diag_decl, complain); return (complain & tf_error) ? true : ok; } /* See if we are already going to perform this check. */ FOR_EACH_VEC_SAFE_ELT (ptr->deferred_access_checks, i, chk) { if (chk->decl == decl && chk->binfo == binfo && chk->diag_decl == diag_decl) { return true; } } /* If not, record the check. */ deferred_access_check new_access = {binfo, decl, diag_decl, input_location}; vec_safe_push (ptr->deferred_access_checks, new_access); return true; } /* Returns nonzero if the current statement is a full expression, i.e. temporaries created during that statement should be destroyed at the end of the statement. */ int stmts_are_full_exprs_p (void) { return current_stmt_tree ()->stmts_are_full_exprs_p; } /* T is a statement. Add it to the statement-tree. This is the C++ version. The C/ObjC frontends have a slightly different version of this function. */ tree add_stmt (tree t) { enum tree_code code = TREE_CODE (t); if (EXPR_P (t) && code != LABEL_EXPR) { if (!EXPR_HAS_LOCATION (t)) SET_EXPR_LOCATION (t, input_location); /* When we expand a statement-tree, we must know whether or not the statements are full-expressions. We record that fact here. */ STMT_IS_FULL_EXPR_P (t) = stmts_are_full_exprs_p (); } if (code == LABEL_EXPR || code == CASE_LABEL_EXPR) STATEMENT_LIST_HAS_LABEL (cur_stmt_list) = 1; /* Add T to the statement-tree. Non-side-effect statements need to be recorded during statement expressions. */ gcc_checking_assert (!stmt_list_stack->is_empty ()); append_to_statement_list_force (t, &cur_stmt_list); return t; } /* Returns the stmt_tree to which statements are currently being added. */ stmt_tree current_stmt_tree (void) { return (cfun ? &cfun->language->base.x_stmt_tree : &scope_chain->x_stmt_tree); } /* If statements are full expressions, wrap STMT in a CLEANUP_POINT_EXPR. */ static tree maybe_cleanup_point_expr (tree expr) { if (!processing_template_decl && stmts_are_full_exprs_p ()) expr = fold_build_cleanup_point_expr (TREE_TYPE (expr), expr); return expr; } /* Like maybe_cleanup_point_expr except have the type of the new expression be void so we don't need to create a temporary variable to hold the inner expression. The reason why we do this is because the original type might be an aggregate and we cannot create a temporary variable for that type. */ tree maybe_cleanup_point_expr_void (tree expr) { if (!processing_template_decl && stmts_are_full_exprs_p ()) expr = fold_build_cleanup_point_expr (void_type_node, expr); return expr; } /* Create a declaration statement for the declaration given by the DECL. */ void add_decl_expr (tree decl) { tree r = build_stmt (input_location, DECL_EXPR, decl); if (DECL_INITIAL (decl) || (DECL_SIZE (decl) && TREE_SIDE_EFFECTS (DECL_SIZE (decl)))) r = maybe_cleanup_point_expr_void (r); add_stmt (r); } /* Finish a scope. */ tree do_poplevel (tree stmt_list) { tree block = NULL; if (stmts_are_full_exprs_p ()) block = poplevel (kept_level_p (), 1, 0); stmt_list = pop_stmt_list (stmt_list); if (!processing_template_decl) { stmt_list = c_build_bind_expr (input_location, block, stmt_list); /* ??? See c_end_compound_stmt re statement expressions. */ } return stmt_list; } /* Begin a new scope. */ static tree do_pushlevel (scope_kind sk) { tree ret = push_stmt_list (); if (stmts_are_full_exprs_p ()) begin_scope (sk, NULL); return ret; } /* Queue a cleanup. CLEANUP is an expression/statement to be executed when the current scope is exited. EH_ONLY is true when this is not meant to apply to normal control flow transfer. */ void push_cleanup (tree decl, tree cleanup, bool eh_only) { tree stmt = build_stmt (input_location, CLEANUP_STMT, NULL, cleanup, decl); CLEANUP_EH_ONLY (stmt) = eh_only; add_stmt (stmt); CLEANUP_BODY (stmt) = push_stmt_list (); } /* Simple infinite loop tracking for -Wreturn-type. We keep a stack of all the current loops, represented by 'NULL_TREE' if we've seen a possible exit, and 'error_mark_node' if not. This is currently used only to suppress the warning about a function with no return statements, and therefore we don't bother noting returns as possible exits. We also don't bother with gotos. */ static void begin_maybe_infinite_loop (tree cond) { /* Only track this while parsing a function, not during instantiation. */ if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl) && !processing_template_decl)) return; bool maybe_infinite = true; if (cond) { cond = fold_non_dependent_expr (cond); maybe_infinite = integer_nonzerop (cond); } vec_safe_push (cp_function_chain->infinite_loops, maybe_infinite ? error_mark_node : NULL_TREE); } /* A break is a possible exit for the current loop. */ void break_maybe_infinite_loop (void) { if (!cfun) return; cp_function_chain->infinite_loops->last() = NULL_TREE; } /* If we reach the end of the loop without seeing a possible exit, we have an infinite loop. */ static void end_maybe_infinite_loop (tree cond) { if (!cfun || (DECL_TEMPLATE_INSTANTIATION (current_function_decl) && !processing_template_decl)) return; tree current = cp_function_chain->infinite_loops->pop(); if (current != NULL_TREE) { cond = fold_non_dependent_expr (cond); if (integer_nonzerop (cond)) current_function_infinite_loop = 1; } } /* Begin a conditional that might contain a declaration. When generating normal code, we want the declaration to appear before the statement containing the conditional. When generating template code, we want the conditional to be rendered as the raw DECL_EXPR. */ static void begin_cond (tree *cond_p) { if (processing_template_decl) *cond_p = push_stmt_list (); } /* Finish such a conditional. */ static void finish_cond (tree *cond_p, tree expr) { if (processing_template_decl) { tree cond = pop_stmt_list (*cond_p); if (expr == NULL_TREE) /* Empty condition in 'for'. */ gcc_assert (empty_expr_stmt_p (cond)); else if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; else if (!empty_expr_stmt_p (cond)) expr = build2 (COMPOUND_EXPR, TREE_TYPE (expr), cond, expr); } *cond_p = expr; } /* If *COND_P specifies a conditional with a declaration, transform the loop such that while (A x = 42) { } for (; A x = 42;) { } becomes while (true) { A x = 42; if (!x) break; } for (;;) { A x = 42; if (!x) break; } The statement list for BODY will be empty if the conditional did not declare anything. */ static void simplify_loop_decl_cond (tree *cond_p, tree body) { tree cond, if_stmt; if (!TREE_SIDE_EFFECTS (body)) return; cond = *cond_p; *cond_p = boolean_true_node; if_stmt = begin_if_stmt (); cond = cp_build_unary_op (TRUTH_NOT_EXPR, cond, 0, tf_warning_or_error); finish_if_stmt_cond (cond, if_stmt); finish_break_stmt (); finish_then_clause (if_stmt); finish_if_stmt (if_stmt); } /* Finish a goto-statement. */ tree finish_goto_stmt (tree destination) { if (identifier_p (destination)) destination = lookup_label (destination); /* We warn about unused labels with -Wunused. That means we have to mark the used labels as used. */ if (TREE_CODE (destination) == LABEL_DECL) TREE_USED (destination) = 1; else { if (check_no_cilk (destination, "Cilk array notation cannot be used as a computed goto expression", "%<_Cilk_spawn%> statement cannot be used as a computed goto expression")) destination = error_mark_node; destination = mark_rvalue_use (destination); if (!processing_template_decl) { destination = cp_convert (ptr_type_node, destination, tf_warning_or_error); if (error_operand_p (destination)) return NULL_TREE; destination = fold_build_cleanup_point_expr (TREE_TYPE (destination), destination); } } check_goto (destination); return add_stmt (build_stmt (input_location, GOTO_EXPR, destination)); } /* COND is the condition-expression for an if, while, etc., statement. Convert it to a boolean value, if appropriate. In addition, verify sequence points if -Wsequence-point is enabled. */ static tree maybe_convert_cond (tree cond) { /* Empty conditions remain empty. */ if (!cond) return NULL_TREE; /* Wait until we instantiate templates before doing conversion. */ if (processing_template_decl) return cond; if (warn_sequence_point) verify_sequence_points (cond); /* Do the conversion. */ cond = convert_from_reference (cond); if (TREE_CODE (cond) == MODIFY_EXPR && !TREE_NO_WARNING (cond) && warn_parentheses) { warning (OPT_Wparentheses, "suggest parentheses around assignment used as truth value"); TREE_NO_WARNING (cond) = 1; } return condition_conversion (cond); } /* Finish an expression-statement, whose EXPRESSION is as indicated. */ tree finish_expr_stmt (tree expr) { tree r = NULL_TREE; if (expr != NULL_TREE) { if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); expr = convert_to_void (expr, ICV_STATEMENT, tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) convert_to_void (build_non_dependent_expr (expr), ICV_STATEMENT, tf_warning_or_error); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; /* Simplification of inner statement expressions, compound exprs, etc can result in us already having an EXPR_STMT. */ if (TREE_CODE (expr) != CLEANUP_POINT_EXPR) { if (TREE_CODE (expr) != EXPR_STMT) expr = build_stmt (input_location, EXPR_STMT, expr); expr = maybe_cleanup_point_expr_void (expr); } r = add_stmt (expr); } return r; } /* Begin an if-statement. Returns a newly created IF_STMT if appropriate. */ tree begin_if_stmt (void) { tree r, scope; scope = do_pushlevel (sk_cond); r = build_stmt (input_location, IF_STMT, NULL_TREE, NULL_TREE, NULL_TREE, scope); begin_cond (&IF_COND (r)); return r; } /* Process the COND of an if-statement, which may be given by IF_STMT. */ void finish_if_stmt_cond (tree cond, tree if_stmt) { finish_cond (&IF_COND (if_stmt), maybe_convert_cond (cond)); add_stmt (if_stmt); THEN_CLAUSE (if_stmt) = push_stmt_list (); } /* Finish the then-clause of an if-statement, which may be given by IF_STMT. */ tree finish_then_clause (tree if_stmt) { THEN_CLAUSE (if_stmt) = pop_stmt_list (THEN_CLAUSE (if_stmt)); return if_stmt; } /* Begin the else-clause of an if-statement. */ void begin_else_clause (tree if_stmt) { ELSE_CLAUSE (if_stmt) = push_stmt_list (); } /* Finish the else-clause of an if-statement, which may be given by IF_STMT. */ void finish_else_clause (tree if_stmt) { ELSE_CLAUSE (if_stmt) = pop_stmt_list (ELSE_CLAUSE (if_stmt)); } /* Finish an if-statement. */ void finish_if_stmt (tree if_stmt) { tree scope = IF_SCOPE (if_stmt); IF_SCOPE (if_stmt) = NULL; add_stmt (do_poplevel (scope)); } /* Begin a while-statement. Returns a newly created WHILE_STMT if appropriate. */ tree begin_while_stmt (void) { tree r; r = build_stmt (input_location, WHILE_STMT, NULL_TREE, NULL_TREE); add_stmt (r); WHILE_BODY (r) = do_pushlevel (sk_block); begin_cond (&WHILE_COND (r)); return r; } /* Process the COND of a while-statement, which may be given by WHILE_STMT. */ void finish_while_stmt_cond (tree cond, tree while_stmt, bool ivdep) { if (check_no_cilk (cond, "Cilk array notation cannot be used as a condition for while statement", "%<_Cilk_spawn%> statement cannot be used as a condition for while statement")) cond = error_mark_node; cond = maybe_convert_cond (cond); finish_cond (&WHILE_COND (while_stmt), cond); begin_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) WHILE_COND (while_stmt) = build2 (ANNOTATE_EXPR, TREE_TYPE (WHILE_COND (while_stmt)), WHILE_COND (while_stmt), build_int_cst (integer_type_node, annot_expr_ivdep_kind)); simplify_loop_decl_cond (&WHILE_COND (while_stmt), WHILE_BODY (while_stmt)); } /* Finish a while-statement, which may be given by WHILE_STMT. */ void finish_while_stmt (tree while_stmt) { end_maybe_infinite_loop (boolean_true_node); WHILE_BODY (while_stmt) = do_poplevel (WHILE_BODY (while_stmt)); } /* Begin a do-statement. Returns a newly created DO_STMT if appropriate. */ tree begin_do_stmt (void) { tree r = build_stmt (input_location, DO_STMT, NULL_TREE, NULL_TREE); begin_maybe_infinite_loop (boolean_true_node); add_stmt (r); DO_BODY (r) = push_stmt_list (); return r; } /* Finish the body of a do-statement, which may be given by DO_STMT. */ void finish_do_body (tree do_stmt) { tree body = DO_BODY (do_stmt) = pop_stmt_list (DO_BODY (do_stmt)); if (TREE_CODE (body) == STATEMENT_LIST && STATEMENT_LIST_TAIL (body)) body = STATEMENT_LIST_TAIL (body)->stmt; if (IS_EMPTY_STMT (body)) warning (OPT_Wempty_body, "suggest explicit braces around empty body in %<do%> statement"); } /* Finish a do-statement, which may be given by DO_STMT, and whose COND is as indicated. */ void finish_do_stmt (tree cond, tree do_stmt, bool ivdep) { if (check_no_cilk (cond, "Cilk array notation cannot be used as a condition for a do-while statement", "%<_Cilk_spawn%> statement cannot be used as a condition for a do-while statement")) cond = error_mark_node; cond = maybe_convert_cond (cond); end_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) cond = build2 (ANNOTATE_EXPR, TREE_TYPE (cond), cond, build_int_cst (integer_type_node, annot_expr_ivdep_kind)); DO_COND (do_stmt) = cond; } /* Finish a return-statement. The EXPRESSION returned, if any, is as indicated. */ tree finish_return_stmt (tree expr) { tree r; bool no_warning; expr = check_return_expr (expr, &no_warning); if (error_operand_p (expr) || (flag_openmp && !check_omp_return ())) { /* Suppress -Wreturn-type for this function. */ if (warn_return_type) TREE_NO_WARNING (current_function_decl) = true; return error_mark_node; } if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); if (DECL_DESTRUCTOR_P (current_function_decl) || (DECL_CONSTRUCTOR_P (current_function_decl) && targetm.cxx.cdtor_returns_this ())) { /* Similarly, all destructors must run destructors for base-classes before returning. So, all returns in a destructor get sent to the DTOR_LABEL; finish_function emits code to return a value there. */ return finish_goto_stmt (cdtor_label); } } r = build_stmt (input_location, RETURN_EXPR, expr); TREE_NO_WARNING (r) |= no_warning; r = maybe_cleanup_point_expr_void (r); r = add_stmt (r); return r; } /* Begin the scope of a for-statement or a range-for-statement. Both the returned trees are to be used in a call to begin_for_stmt or begin_range_for_stmt. */ tree begin_for_scope (tree *init) { tree scope = NULL_TREE; if (flag_new_for_scope > 0) scope = do_pushlevel (sk_for); if (processing_template_decl) *init = push_stmt_list (); else *init = NULL_TREE; return scope; } /* Begin a for-statement. Returns a new FOR_STMT. SCOPE and INIT should be the return of begin_for_scope, or both NULL_TREE */ tree begin_for_stmt (tree scope, tree init) { tree r; r = build_stmt (input_location, FOR_STMT, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); if (scope == NULL_TREE) { gcc_assert (!init || !(flag_new_for_scope > 0)); if (!init) scope = begin_for_scope (&init); } FOR_INIT_STMT (r) = init; FOR_SCOPE (r) = scope; return r; } /* Finish the for-init-statement of a for-statement, which may be given by FOR_STMT. */ void finish_for_init_stmt (tree for_stmt) { if (processing_template_decl) FOR_INIT_STMT (for_stmt) = pop_stmt_list (FOR_INIT_STMT (for_stmt)); add_stmt (for_stmt); FOR_BODY (for_stmt) = do_pushlevel (sk_block); begin_cond (&FOR_COND (for_stmt)); } /* Finish the COND of a for-statement, which may be given by FOR_STMT. */ void finish_for_cond (tree cond, tree for_stmt, bool ivdep) { if (check_no_cilk (cond, "Cilk array notation cannot be used in a condition for a for-loop", "%<_Cilk_spawn%> statement cannot be used in a condition for a for-loop")) cond = error_mark_node; cond = maybe_convert_cond (cond); finish_cond (&FOR_COND (for_stmt), cond); begin_maybe_infinite_loop (cond); if (ivdep && cond != error_mark_node) FOR_COND (for_stmt) = build2 (ANNOTATE_EXPR, TREE_TYPE (FOR_COND (for_stmt)), FOR_COND (for_stmt), build_int_cst (integer_type_node, annot_expr_ivdep_kind)); simplify_loop_decl_cond (&FOR_COND (for_stmt), FOR_BODY (for_stmt)); } /* Finish the increment-EXPRESSION in a for-statement, which may be given by FOR_STMT. */ void finish_for_expr (tree expr, tree for_stmt) { if (!expr) return; /* If EXPR is an overloaded function, issue an error; there is no context available to use to perform overload resolution. */ if (type_unknown_p (expr)) { cxx_incomplete_type_error (expr, TREE_TYPE (expr)); expr = error_mark_node; } if (!processing_template_decl) { if (warn_sequence_point) verify_sequence_points (expr); expr = convert_to_void (expr, ICV_THIRD_IN_FOR, tf_warning_or_error); } else if (!type_dependent_expression_p (expr)) convert_to_void (build_non_dependent_expr (expr), ICV_THIRD_IN_FOR, tf_warning_or_error); expr = maybe_cleanup_point_expr_void (expr); if (check_for_bare_parameter_packs (expr)) expr = error_mark_node; FOR_EXPR (for_stmt) = expr; } /* Finish the body of a for-statement, which may be given by FOR_STMT. The increment-EXPR for the loop must be provided. It can also finish RANGE_FOR_STMT. */ void finish_for_stmt (tree for_stmt) { end_maybe_infinite_loop (boolean_true_node); if (TREE_CODE (for_stmt) == RANGE_FOR_STMT) RANGE_FOR_BODY (for_stmt) = do_poplevel (RANGE_FOR_BODY (for_stmt)); else FOR_BODY (for_stmt) = do_poplevel (FOR_BODY (for_stmt)); /* Pop the scope for the body of the loop. */ if (flag_new_for_scope > 0) { tree scope; tree *scope_ptr = (TREE_CODE (for_stmt) == RANGE_FOR_STMT ? &RANGE_FOR_SCOPE (for_stmt) : &FOR_SCOPE (for_stmt)); scope = *scope_ptr; *scope_ptr = NULL; add_stmt (do_poplevel (scope)); } } /* Begin a range-for-statement. Returns a new RANGE_FOR_STMT. SCOPE and INIT should be the return of begin_for_scope, or both NULL_TREE . To finish it call finish_for_stmt(). */ tree begin_range_for_stmt (tree scope, tree init) { tree r; begin_maybe_infinite_loop (boolean_false_node); r = build_stmt (input_location, RANGE_FOR_STMT, NULL_TREE, NULL_TREE, NULL_TREE, NULL_TREE); if (scope == NULL_TREE) { gcc_assert (!init || !(flag_new_for_scope > 0)); if (!init) scope = begin_for_scope (&init); } /* RANGE_FOR_STMTs do not use nor save the init tree, so we pop it now. */ if (init) pop_stmt_list (init); RANGE_FOR_SCOPE (r) = scope; return r; } /* Finish the head of a range-based for statement, which may be given by RANGE_FOR_STMT. DECL must be the declaration and EXPR must be the loop expression. */ void finish_range_for_decl (tree range_for_stmt, tree decl, tree expr) { RANGE_FOR_DECL (range_for_stmt) = decl; RANGE_FOR_EXPR (range_for_stmt) = expr; add_stmt (range_for_stmt); RANGE_FOR_BODY (range_for_stmt) = do_pushlevel (sk_block); } /* Finish a break-statement. */ tree finish_break_stmt (void) { /* In switch statements break is sometimes stylistically used after a return statement. This can lead to spurious warnings about control reaching the end of a non-void function when it is inlined. Note that we are calling block_may_fallthru with language specific tree nodes; this works because block_may_fallthru returns true when given something it does not understand. */ if (!block_may_fallthru (cur_stmt_list)) return void_node; return add_stmt (build_stmt (input_location, BREAK_STMT)); } /* Finish a continue-statement. */ tree finish_continue_stmt (void) { return add_stmt (build_stmt (input_location, CONTINUE_STMT)); } /* Begin a switch-statement. Returns a new SWITCH_STMT if appropriate. */ tree begin_switch_stmt (void) { tree r, scope; scope = do_pushlevel (sk_cond); r = build_stmt (input_location, SWITCH_STMT, NULL_TREE, NULL_TREE, NULL_TREE, scope); begin_cond (&SWITCH_STMT_COND (r)); return r; } /* Finish the cond of a switch-statement. */ void finish_switch_cond (tree cond, tree switch_stmt) { tree orig_type = NULL; if (check_no_cilk (cond, "Cilk array notation cannot be used as a condition for switch statement", "%<_Cilk_spawn%> statement cannot be used as a condition for switch statement")) cond = error_mark_node; if (!processing_template_decl) { /* Convert the condition to an integer or enumeration type. */ cond = build_expr_type_conversion (WANT_INT | WANT_ENUM, cond, true); if (cond == NULL_TREE) { error ("switch quantity not an integer"); cond = error_mark_node; } /* We want unlowered type here to handle enum bit-fields. */ orig_type = unlowered_expr_type (cond); if (TREE_CODE (orig_type) != ENUMERAL_TYPE) orig_type = TREE_TYPE (cond); if (cond != error_mark_node) { /* Warn if the condition has boolean value. */ if (TREE_CODE (orig_type) == BOOLEAN_TYPE) warning_at (input_location, OPT_Wswitch_bool, "switch condition has type bool"); /* [stmt.switch] Integral promotions are performed. */ cond = perform_integral_promotions (cond); cond = maybe_cleanup_point_expr (cond); } } if (check_for_bare_parameter_packs (cond)) cond = error_mark_node; else if (!processing_template_decl && warn_sequence_point) verify_sequence_points (cond); finish_cond (&SWITCH_STMT_COND (switch_stmt), cond); SWITCH_STMT_TYPE (switch_stmt) = orig_type; add_stmt (switch_stmt); push_switch (switch_stmt); SWITCH_STMT_BODY (switch_stmt) = push_stmt_list (); } /* Finish the body of a switch-statement, which may be given by SWITCH_STMT. The COND to switch on is indicated. */ void finish_switch_stmt (tree switch_stmt) { tree scope; SWITCH_STMT_BODY (switch_stmt) = pop_stmt_list (SWITCH_STMT_BODY (switch_stmt)); pop_switch (); scope = SWITCH_STMT_SCOPE (switch_stmt); SWITCH_STMT_SCOPE (switch_stmt) = NULL; add_stmt (do_poplevel (scope)); } /* Begin a try-block. Returns a newly-created TRY_BLOCK if appropriate. */ tree begin_try_block (void) { tree r = build_stmt (input_location, TRY_BLOCK, NULL_TREE, NULL_TREE); add_stmt (r); TRY_STMTS (r) = push_stmt_list (); return r; } /* Likewise, for a function-try-block. The block returned in *COMPOUND_STMT is an artificial outer scope, containing the function-try-block. */ tree begin_function_try_block (tree *compound_stmt) { tree r; /* This outer scope does not exist in the C++ standard, but we need a place to put __FUNCTION__ and similar variables. */ *compound_stmt = begin_compound_stmt (0); r = begin_try_block (); FN_TRY_BLOCK_P (r) = 1; return r; } /* Finish a try-block, which may be given by TRY_BLOCK. */ void finish_try_block (tree try_block) { TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block)); TRY_HANDLERS (try_block) = push_stmt_list (); } /* Finish the body of a cleanup try-block, which may be given by TRY_BLOCK. */ void finish_cleanup_try_block (tree try_block) { TRY_STMTS (try_block) = pop_stmt_list (TRY_STMTS (try_block)); } /* Finish an implicitly generated try-block, with a cleanup is given by CLEANUP. */ void finish_cleanup (tree cleanup, tree try_block) { TRY_HANDLERS (try_block) = cleanup; CLEANUP_P (try_block) = 1; } /* Likewise, for a function-try-block. */ void finish_function_try_block (tree try_block) { finish_try_block (try_block); /* FIXME : something queer about CTOR_INITIALIZER somehow following the try block, but moving it inside. */ in_function_try_handler = 1; } /* Finish a handler-sequence for a try-block, which may be given by TRY_BLOCK. */ void finish_handler_sequence (tree try_block) { TRY_HANDLERS (try_block) = pop_stmt_list (TRY_HANDLERS (try_block)); check_handlers (TRY_HANDLERS (try_block)); } /* Finish the handler-seq for a function-try-block, given by TRY_BLOCK. COMPOUND_STMT is the outer block created by begin_function_try_block. */ void finish_function_handler_sequence (tree try_block, tree compound_stmt) { in_function_try_handler = 0; finish_handler_sequence (try_block); finish_compound_stmt (compound_stmt); } /* Begin a handler. Returns a HANDLER if appropriate. */ tree begin_handler (void) { tree r; r = build_stmt (input_location, HANDLER, NULL_TREE, NULL_TREE); add_stmt (r); /* Create a binding level for the eh_info and the exception object cleanup. */ HANDLER_BODY (r) = do_pushlevel (sk_catch); return r; } /* Finish the handler-parameters for a handler, which may be given by HANDLER. DECL is the declaration for the catch parameter, or NULL if this is a `catch (...)' clause. */ void finish_handler_parms (tree decl, tree handler) { tree type = NULL_TREE; if (processing_template_decl) { if (decl) { decl = pushdecl (decl); decl = push_template_decl (decl); HANDLER_PARMS (handler) = decl; type = TREE_TYPE (decl); } } else type = expand_start_catch_block (decl); HANDLER_TYPE (handler) = type; } /* Finish a handler, which may be given by HANDLER. The BLOCKs are the return value from the matching call to finish_handler_parms. */ void finish_handler (tree handler) { if (!processing_template_decl) expand_end_catch_block (); HANDLER_BODY (handler) = do_poplevel (HANDLER_BODY (handler)); } /* Begin a compound statement. FLAGS contains some bits that control the behavior and context. If BCS_NO_SCOPE is set, the compound statement does not define a scope. If BCS_FN_BODY is set, this is the outermost block of a function. If BCS_TRY_BLOCK is set, this is the block created on behalf of a TRY statement. Returns a token to be passed to finish_compound_stmt. */ tree begin_compound_stmt (unsigned int flags) { tree r; if (flags & BCS_NO_SCOPE) { r = push_stmt_list (); STATEMENT_LIST_NO_SCOPE (r) = 1; /* Normally, we try hard to keep the BLOCK for a statement-expression. But, if it's a statement-expression with a scopeless block, there's nothing to keep, and we don't want to accidentally keep a block *inside* the scopeless block. */ keep_next_level (false); } else r = do_pushlevel (flags & BCS_TRY_BLOCK ? sk_try : sk_block); /* When processing a template, we need to remember where the braces were, so that we can set up identical scopes when instantiating the template later. BIND_EXPR is a handy candidate for this. Note that do_poplevel won't create a BIND_EXPR itself here (and thus result in nested BIND_EXPRs), since we don't build BLOCK nodes when processing templates. */ if (processing_template_decl) { r = build3 (BIND_EXPR, NULL, NULL, r, NULL); BIND_EXPR_TRY_BLOCK (r) = (flags & BCS_TRY_BLOCK) != 0; BIND_EXPR_BODY_BLOCK (r) = (flags & BCS_FN_BODY) != 0; TREE_SIDE_EFFECTS (r) = 1; } return r; } /* Finish a compound-statement, which is given by STMT. */ void finish_compound_stmt (tree stmt) { if (TREE_CODE (stmt) == BIND_EXPR) { tree body = do_poplevel (BIND_EXPR_BODY (stmt)); /* If the STATEMENT_LIST is empty and this BIND_EXPR isn't special, discard the BIND_EXPR so it can be merged with the containing STATEMENT_LIST. */ if (TREE_CODE (body) == STATEMENT_LIST && STATEMENT_LIST_HEAD (body) == NULL && !BIND_EXPR_BODY_BLOCK (stmt) && !BIND_EXPR_TRY_BLOCK (stmt)) stmt = body; else BIND_EXPR_BODY (stmt) = body; } else if (STATEMENT_LIST_NO_SCOPE (stmt)) stmt = pop_stmt_list (stmt); else { /* Destroy any ObjC "super" receivers that may have been created. */ objc_clear_super_receiver (); stmt = do_poplevel (stmt); } /* ??? See c_end_compound_stmt wrt statement expressions. */ add_stmt (stmt); } /* Finish an asm-statement, whose components are a STRING, some OUTPUT_OPERANDS, some INPUT_OPERANDS, some CLOBBERS and some LABELS. Also note whether the asm-statement should be considered volatile. */ tree finish_asm_stmt (int volatile_p, tree string, tree output_operands, tree input_operands, tree clobbers, tree labels) { tree r; tree t; int ninputs = list_length (input_operands); int noutputs = list_length (output_operands); if (!processing_template_decl) { const char *constraint; const char **oconstraints; bool allows_mem, allows_reg, is_inout; tree operand; int i; oconstraints = XALLOCAVEC (const char *, noutputs); string = resolve_asm_operand_names (string, output_operands, input_operands, labels); for (i = 0, t = output_operands; t; t = TREE_CHAIN (t), ++i) { operand = TREE_VALUE (t); /* ??? Really, this should not be here. Users should be using a proper lvalue, dammit. But there's a long history of using casts in the output operands. In cases like longlong.h, this becomes a primitive form of typechecking -- if the cast can be removed, then the output operand had a type of the proper width; otherwise we'll get an error. Gross, but ... */ STRIP_NOPS (operand); operand = mark_lvalue_use (operand); if (!lvalue_or_else (operand, lv_asm, tf_warning_or_error)) operand = error_mark_node; if (operand != error_mark_node && (TREE_READONLY (operand) || CP_TYPE_CONST_P (TREE_TYPE (operand)) /* Functions are not modifiable, even though they are lvalues. */ || TREE_CODE (TREE_TYPE (operand)) == FUNCTION_TYPE || TREE_CODE (TREE_TYPE (operand)) == METHOD_TYPE /* If it's an aggregate and any field is const, then it is effectively const. */ || (CLASS_TYPE_P (TREE_TYPE (operand)) && C_TYPE_FIELDS_READONLY (TREE_TYPE (operand))))) cxx_readonly_error (operand, lv_asm); constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); oconstraints[i] = constraint; if (parse_output_constraint (&constraint, i, ninputs, noutputs, &allows_mem, &allows_reg, &is_inout)) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && !cxx_mark_addressable (operand)) operand = error_mark_node; } else operand = error_mark_node; TREE_VALUE (t) = operand; } for (i = 0, t = input_operands; t; ++i, t = TREE_CHAIN (t)) { constraint = TREE_STRING_POINTER (TREE_VALUE (TREE_PURPOSE (t))); bool constraint_parsed = parse_input_constraint (&constraint, i, ninputs, noutputs, 0, oconstraints, &allows_mem, &allows_reg); /* If the operand is going to end up in memory, don't call decay_conversion. */ if (constraint_parsed && !allows_reg && allows_mem) operand = mark_lvalue_use (TREE_VALUE (t)); else operand = decay_conversion (TREE_VALUE (t), tf_warning_or_error); /* If the type of the operand hasn't been determined (e.g., because it involves an overloaded function), then issue an error message. There's no context available to resolve the overloading. */ if (TREE_TYPE (operand) == unknown_type_node) { error ("type of asm operand %qE could not be determined", TREE_VALUE (t)); operand = error_mark_node; } if (constraint_parsed) { /* If the operand is going to end up in memory, mark it addressable. */ if (!allows_reg && allows_mem) { /* Strip the nops as we allow this case. FIXME, this really should be rejected or made deprecated. */ STRIP_NOPS (operand); if (!cxx_mark_addressable (operand)) operand = error_mark_node; } else if (!allows_reg && !allows_mem) { /* If constraint allows neither register nor memory, try harder to get a constant. */ tree constop = maybe_constant_value (operand); if (TREE_CONSTANT (constop)) operand = constop; } } else operand = error_mark_node; TREE_VALUE (t) = operand; } } r = build_stmt (input_location, ASM_EXPR, string, output_operands, input_operands, clobbers, labels); ASM_VOLATILE_P (r) = volatile_p || noutputs == 0; r = maybe_cleanup_point_expr_void (r); return add_stmt (r); } /* Finish a label with the indicated NAME. Returns the new label. */ tree finish_label_stmt (tree name) { tree decl = define_label (input_location, name); if (decl == error_mark_node) return error_mark_node; add_stmt (build_stmt (input_location, LABEL_EXPR, decl)); return decl; } /* Finish a series of declarations for local labels. G++ allows users to declare "local" labels, i.e., labels with scope. This extension is useful when writing code involving statement-expressions. */ void finish_label_decl (tree name) { if (!at_function_scope_p ()) { error ("__label__ declarations are only allowed in function scopes"); return; } add_decl_expr (declare_local_label (name)); } /* When DECL goes out of scope, make sure that CLEANUP is executed. */ void finish_decl_cleanup (tree decl, tree cleanup) { push_cleanup (decl, cleanup, false); } /* If the current scope exits with an exception, run CLEANUP. */ void finish_eh_cleanup (tree cleanup) { push_cleanup (NULL, cleanup, true); } /* The MEM_INITS is a list of mem-initializers, in reverse of the order they were written by the user. Each node is as for emit_mem_initializers. */ void finish_mem_initializers (tree mem_inits) { /* Reorder the MEM_INITS so that they are in the order they appeared in the source program. */ mem_inits = nreverse (mem_inits); if (processing_template_decl) { tree mem; for (mem = mem_inits; mem; mem = TREE_CHAIN (mem)) { /* If the TREE_PURPOSE is a TYPE_PACK_EXPANSION, skip the check for bare parameter packs in the TREE_VALUE, because any parameter packs in the TREE_VALUE have already been bound as part of the TREE_PURPOSE. See make_pack_expansion for more information. */ if (TREE_CODE (TREE_PURPOSE (mem)) != TYPE_PACK_EXPANSION && check_for_bare_parameter_packs (TREE_VALUE (mem))) TREE_VALUE (mem) = error_mark_node; } add_stmt (build_min_nt_loc (UNKNOWN_LOCATION, CTOR_INITIALIZER, mem_inits)); } else emit_mem_initializers (mem_inits); } /* Obfuscate EXPR if it looks like an id-expression or member access so that the call to finish_decltype in do_auto_deduction will give the right result. */ tree force_paren_expr (tree expr) { /* This is only needed for decltype(auto) in C++14. */ if (cxx_dialect < cxx14) return expr; /* If we're in unevaluated context, we can't be deducing a return/initializer type, so we don't need to mess with this. */ if (cp_unevaluated_operand) return expr; if (!DECL_P (expr) && TREE_CODE (expr) != COMPONENT_REF && TREE_CODE (expr) != SCOPE_REF) return expr; if (TREE_CODE (expr) == COMPONENT_REF) REF_PARENTHESIZED_P (expr) = true; else if (type_dependent_expression_p (expr)) expr = build1 (PAREN_EXPR, TREE_TYPE (expr), expr); else { cp_lvalue_kind kind = lvalue_kind (expr); if ((kind & ~clk_class) != clk_none) { tree type = unlowered_expr_type (expr); bool rval = !!(kind & clk_rvalueref); type = cp_build_reference_type (type, rval); /* This inhibits warnings in, eg, cxx_mark_addressable (c++/60955). */ warning_sentinel s (extra_warnings); expr = build_static_cast (type, expr, tf_error); if (expr != error_mark_node) REF_PARENTHESIZED_P (expr) = true; } } return expr; } /* Finish a parenthesized expression EXPR. */ tree finish_parenthesized_expr (tree expr) { if (EXPR_P (expr)) /* This inhibits warnings in c_common_truthvalue_conversion. */ TREE_NO_WARNING (expr) = 1; if (TREE_CODE (expr) == OFFSET_REF || TREE_CODE (expr) == SCOPE_REF) /* [expr.unary.op]/3 The qualified id of a pointer-to-member must not be enclosed in parentheses. */ PTRMEM_OK_P (expr) = 0; if (TREE_CODE (expr) == STRING_CST) PAREN_STRING_LITERAL_P (expr) = 1; expr = force_paren_expr (expr); return expr; } /* Finish a reference to a non-static data member (DECL) that is not preceded by `.' or `->'. */ tree finish_non_static_data_member (tree decl, tree object, tree qualifying_scope) { gcc_assert (TREE_CODE (decl) == FIELD_DECL); if (!object) { tree scope = qualifying_scope; if (scope == NULL_TREE) scope = context_for_name_lookup (decl); object = maybe_dummy_object (scope, NULL); } object = maybe_resolve_dummy (object, true); if (object == error_mark_node) return error_mark_node; /* DR 613/850: Can use non-static data members without an associated object in sizeof/decltype/alignof. */ if (is_dummy_object (object) && cp_unevaluated_operand == 0 && (!processing_template_decl || !current_class_ref)) { if (current_function_decl && DECL_STATIC_FUNCTION_P (current_function_decl)) error ("invalid use of member %qD in static member function", decl); else error ("invalid use of non-static data member %qD", decl); inform (DECL_SOURCE_LOCATION (decl), "declared here"); return error_mark_node; } if (current_class_ptr) TREE_USED (current_class_ptr) = 1; if (processing_template_decl && !qualifying_scope) { tree type = TREE_TYPE (decl); if (TREE_CODE (type) == REFERENCE_TYPE) /* Quals on the object don't matter. */; else if (PACK_EXPANSION_P (type)) /* Don't bother trying to represent this. */ type = NULL_TREE; else { /* Set the cv qualifiers. */ int quals = cp_type_quals (TREE_TYPE (object)); if (DECL_MUTABLE_P (decl)) quals &= ~TYPE_QUAL_CONST; quals |= cp_type_quals (TREE_TYPE (decl)); type = cp_build_qualified_type (type, quals); } return (convert_from_reference (build_min (COMPONENT_REF, type, object, decl, NULL_TREE))); } /* If PROCESSING_TEMPLATE_DECL is nonzero here, then QUALIFYING_SCOPE is also non-null. Wrap this in a SCOPE_REF for now. */ else if (processing_template_decl) return build_qualified_name (TREE_TYPE (decl), qualifying_scope, decl, /*template_p=*/false); else { tree access_type = TREE_TYPE (object); perform_or_defer_access_check (TYPE_BINFO (access_type), decl, decl, tf_warning_or_error); /* If the data member was named `C::M', convert `*this' to `C' first. */ if (qualifying_scope) { tree binfo = NULL_TREE; object = build_scoped_ref (object, qualifying_scope, &binfo); } return build_class_member_access_expr (object, decl, /*access_path=*/NULL_TREE, /*preserve_reference=*/false, tf_warning_or_error); } } /* If we are currently parsing a template and we encountered a typedef TYPEDEF_DECL that is being accessed though CONTEXT, this function adds the typedef to a list tied to the current template. At template instantiation time, that list is walked and access check performed for each typedef. LOCATION is the location of the usage point of TYPEDEF_DECL. */ void add_typedef_to_current_template_for_access_check (tree typedef_decl, tree context, location_t location) { tree template_info = NULL; tree cs = current_scope (); if (!is_typedef_decl (typedef_decl) || !context || !CLASS_TYPE_P (context) || !cs) return; if (CLASS_TYPE_P (cs) || TREE_CODE (cs) == FUNCTION_DECL) template_info = get_template_info (cs); if (template_info && TI_TEMPLATE (template_info) && !currently_open_class (context)) append_type_to_template_for_access_check (cs, typedef_decl, context, location); } /* DECL was the declaration to which a qualified-id resolved. Issue an error message if it is not accessible. If OBJECT_TYPE is non-NULL, we have just seen `x->' or `x.' and OBJECT_TYPE is the type of `*x', or `x', respectively. If the DECL was named as `A::B' then NESTED_NAME_SPECIFIER is `A'. */ void check_accessibility_of_qualified_id (tree decl, tree object_type, tree nested_name_specifier) { tree scope; tree qualifying_type = NULL_TREE; /* If we are parsing a template declaration and if decl is a typedef, add it to a list tied to the template. At template instantiation time, that list will be walked and access check performed. */ add_typedef_to_current_template_for_access_check (decl, nested_name_specifier ? nested_name_specifier : DECL_CONTEXT (decl), input_location); /* If we're not checking, return immediately. */ if (deferred_access_no_check) return; /* Determine the SCOPE of DECL. */ scope = context_for_name_lookup (decl); /* If the SCOPE is not a type, then DECL is not a member. */ if (!TYPE_P (scope)) return; /* Compute the scope through which DECL is being accessed. */ if (object_type /* OBJECT_TYPE might not be a class type; consider: class A { typedef int I; }; I *p; p->A::I::~I(); In this case, we will have "A::I" as the DECL, but "I" as the OBJECT_TYPE. */ && CLASS_TYPE_P (object_type) && DERIVED_FROM_P (scope, object_type)) /* If we are processing a `->' or `.' expression, use the type of the left-hand side. */ qualifying_type = object_type; else if (nested_name_specifier) { /* If the reference is to a non-static member of the current class, treat it as if it were referenced through `this'. */ tree ct; if (DECL_NONSTATIC_MEMBER_P (decl) && current_class_ptr && DERIVED_FROM_P (scope, ct = current_nonlambda_class_type ())) qualifying_type = ct; /* Otherwise, use the type indicated by the nested-name-specifier. */ else qualifying_type = nested_name_specifier; } else /* Otherwise, the name must be from the current class or one of its bases. */ qualifying_type = currently_open_derived_class (scope); if (qualifying_type /* It is possible for qualifying type to be a TEMPLATE_TYPE_PARM or similar in a default argument value. */ && CLASS_TYPE_P (qualifying_type) && !dependent_type_p (qualifying_type)) perform_or_defer_access_check (TYPE_BINFO (qualifying_type), decl, decl, tf_warning_or_error); } /* EXPR is the result of a qualified-id. The QUALIFYING_CLASS was the class named to the left of the "::" operator. DONE is true if this expression is a complete postfix-expression; it is false if this expression is followed by '->', '[', '(', etc. ADDRESS_P is true iff this expression is the operand of '&'. TEMPLATE_P is true iff the qualified-id was of the form "A::template B". TEMPLATE_ARG_P is true iff this qualified name appears as a template argument. */ tree finish_qualified_id_expr (tree qualifying_class, tree expr, bool done, bool address_p, bool template_p, bool template_arg_p, tsubst_flags_t complain) { gcc_assert (TYPE_P (qualifying_class)); if (error_operand_p (expr)) return error_mark_node; if ((DECL_P (expr) || BASELINK_P (expr)) && !mark_used (expr, complain)) return error_mark_node; if (template_p) check_template_keyword (expr); /* If EXPR occurs as the operand of '&', use special handling that permits a pointer-to-member. */ if (address_p && done) { if (TREE_CODE (expr) == SCOPE_REF) expr = TREE_OPERAND (expr, 1); expr = build_offset_ref (qualifying_class, expr, /*address_p=*/true, complain); return expr; } /* No need to check access within an enum. */ if (TREE_CODE (qualifying_class) == ENUMERAL_TYPE) return expr; /* Within the scope of a class, turn references to non-static members into expression of the form "this->...". */ if (template_arg_p) /* But, within a template argument, we do not want make the transformation, as there is no "this" pointer. */ ; else if (TREE_CODE (expr) == FIELD_DECL) { push_deferring_access_checks (dk_no_check); expr = finish_non_static_data_member (expr, NULL_TREE, qualifying_class); pop_deferring_access_checks (); } else if (BASELINK_P (expr) && !processing_template_decl) { /* See if any of the functions are non-static members. */ /* If so, the expression may be relative to 'this'. */ if (!shared_member_p (expr) && current_class_ptr && DERIVED_FROM_P (qualifying_class, current_nonlambda_class_type ())) expr = (build_class_member_access_expr (maybe_dummy_object (qualifying_class, NULL), expr, BASELINK_ACCESS_BINFO (expr), /*preserve_reference=*/false, complain)); else if (done) /* The expression is a qualified name whose address is not being taken. */ expr = build_offset_ref (qualifying_class, expr, /*address_p=*/false, complain); } else if (BASELINK_P (expr)) ; else { /* In a template, return a SCOPE_REF for most qualified-ids so that we can check access at instantiation time. But if we're looking at a member of the current instantiation, we know we have access and building up the SCOPE_REF confuses non-type template argument handling. */ if (processing_template_decl && !currently_open_class (qualifying_class)) expr = build_qualified_name (TREE_TYPE (expr), qualifying_class, expr, template_p); expr = convert_from_reference (expr); } return expr; } /* Begin a statement-expression. The value returned must be passed to finish_stmt_expr. */ tree begin_stmt_expr (void) { return push_stmt_list (); } /* Process the final expression of a statement expression. EXPR can be NULL, if the final expression is empty. Return a STATEMENT_LIST containing all the statements in the statement-expression, or ERROR_MARK_NODE if there was an error. */ tree finish_stmt_expr_expr (tree expr, tree stmt_expr) { if (error_operand_p (expr)) { /* The type of the statement-expression is the type of the last expression. */ TREE_TYPE (stmt_expr) = error_mark_node; return error_mark_node; } /* If the last statement does not have "void" type, then the value of the last statement is the value of the entire expression. */ if (expr) { tree type = TREE_TYPE (expr); if (processing_template_decl) { expr = build_stmt (input_location, EXPR_STMT, expr); expr = add_stmt (expr); /* Mark the last statement so that we can recognize it as such at template-instantiation time. */ EXPR_STMT_STMT_EXPR_RESULT (expr) = 1; } else if (VOID_TYPE_P (type)) { /* Just treat this like an ordinary statement. */ expr = finish_expr_stmt (expr); } else { /* It actually has a value we need to deal with. First, force it to be an rvalue so that we won't need to build up a copy constructor call later when we try to assign it to something. */ expr = force_rvalue (expr, tf_warning_or_error); if (error_operand_p (expr)) return error_mark_node; /* Update for array-to-pointer decay. */ type = TREE_TYPE (expr); /* Wrap it in a CLEANUP_POINT_EXPR and add it to the list like a normal statement, but don't convert to void or actually add the EXPR_STMT. */ if (TREE_CODE (expr) != CLEANUP_POINT_EXPR) expr = maybe_cleanup_point_expr (expr); add_stmt (expr); } /* The type of the statement-expression is the type of the last expression. */ TREE_TYPE (stmt_expr) = type; } return stmt_expr; } /* Finish a statement-expression. EXPR should be the value returned by the previous begin_stmt_expr. Returns an expression representing the statement-expression. */ tree finish_stmt_expr (tree stmt_expr, bool has_no_scope) { tree type; tree result; if (error_operand_p (stmt_expr)) { pop_stmt_list (stmt_expr); return error_mark_node; } gcc_assert (TREE_CODE (stmt_expr) == STATEMENT_LIST); type = TREE_TYPE (stmt_expr); result = pop_stmt_list (stmt_expr); TREE_TYPE (result) = type; if (processing_template_decl) { result = build_min (STMT_EXPR, type, result); TREE_SIDE_EFFECTS (result) = 1; STMT_EXPR_NO_SCOPE (result) = has_no_scope; } else if (CLASS_TYPE_P (type)) { /* Wrap the statement-expression in a TARGET_EXPR so that the temporary object created by the final expression is destroyed at the end of the full-expression containing the statement-expression. */ result = force_target_expr (type, result, tf_warning_or_error); } return result; } /* Returns the expression which provides the value of STMT_EXPR. */ tree stmt_expr_value_expr (tree stmt_expr) { tree t = STMT_EXPR_STMT (stmt_expr); if (TREE_CODE (t) == BIND_EXPR) t = BIND_EXPR_BODY (t); if (TREE_CODE (t) == STATEMENT_LIST && STATEMENT_LIST_TAIL (t)) t = STATEMENT_LIST_TAIL (t)->stmt; if (TREE_CODE (t) == EXPR_STMT) t = EXPR_STMT_EXPR (t); return t; } /* Return TRUE iff EXPR_STMT is an empty list of expression statements. */ bool empty_expr_stmt_p (tree expr_stmt) { tree body = NULL_TREE; if (expr_stmt == void_node) return true; if (expr_stmt) { if (TREE_CODE (expr_stmt) == EXPR_STMT) body = EXPR_STMT_EXPR (expr_stmt); else if (TREE_CODE (expr_stmt) == STATEMENT_LIST) body = expr_stmt; } if (body) { if (TREE_CODE (body) == STATEMENT_LIST) return tsi_end_p (tsi_start (body)); else return empty_expr_stmt_p (body); } return false; } /* Perform Koenig lookup. FN is the postfix-expression representing the function (or functions) to call; ARGS are the arguments to the call. Returns the functions to be considered by overload resolution. */ tree perform_koenig_lookup (tree fn, vec<tree, va_gc> *args, tsubst_flags_t complain) { tree identifier = NULL_TREE; tree functions = NULL_TREE; tree tmpl_args = NULL_TREE; bool template_id = false; if (TREE_CODE (fn) == TEMPLATE_ID_EXPR) { /* Use a separate flag to handle null args. */ template_id = true; tmpl_args = TREE_OPERAND (fn, 1); fn = TREE_OPERAND (fn, 0); } /* Find the name of the overloaded function. */ if (identifier_p (fn)) identifier = fn; else if (is_overloaded_fn (fn)) { functions = fn; identifier = DECL_NAME (get_first_fn (functions)); } else if (DECL_P (fn)) { functions = fn; identifier = DECL_NAME (fn); } /* A call to a namespace-scope function using an unqualified name. Do Koenig lookup -- unless any of the arguments are type-dependent. */ if (!any_type_dependent_arguments_p (args) && !any_dependent_template_arguments_p (tmpl_args)) { fn = lookup_arg_dependent (identifier, functions, args); if (!fn) { /* The unqualified name could not be resolved. */ if (complain) fn = unqualified_fn_lookup_error (identifier); else fn = identifier; } } if (fn && template_id) fn = build2 (TEMPLATE_ID_EXPR, unknown_type_node, fn, tmpl_args); return fn; } /* Generate an expression for `FN (ARGS)'. This may change the contents of ARGS. If DISALLOW_VIRTUAL is true, the call to FN will be not generated as a virtual call, even if FN is virtual. (This flag is set when encountering an expression where the function name is explicitly qualified. For example a call to `X::f' never generates a virtual call.) Returns code for the call. */ tree finish_call_expr (tree fn, vec<tree, va_gc> **args, bool disallow_virtual, bool koenig_p, tsubst_flags_t complain) { tree result; tree orig_fn; vec<tree, va_gc> *orig_args = NULL; if (fn == error_mark_node) return error_mark_node; gcc_assert (!TYPE_P (fn)); orig_fn = fn; if (processing_template_decl) { /* If the call expression is dependent, build a CALL_EXPR node with no type; type_dependent_expression_p recognizes expressions with no type as being dependent. */ if (type_dependent_expression_p (fn) || any_type_dependent_arguments_p (*args) /* For a non-static member function that doesn't have an explicit object argument, we need to specifically test the type dependency of the "this" pointer because it is not included in *ARGS even though it is considered to be part of the list of arguments. Note that this is related to CWG issues 515 and 1005. */ || (TREE_CODE (fn) != COMPONENT_REF && non_static_member_function_p (fn) && current_class_ref && type_dependent_expression_p (current_class_ref))) { result = build_nt_call_vec (fn, *args); SET_EXPR_LOCATION (result, EXPR_LOC_OR_LOC (fn, input_location)); KOENIG_LOOKUP_P (result) = koenig_p; if (cfun) { do { tree fndecl = OVL_CURRENT (fn); if (TREE_CODE (fndecl) != FUNCTION_DECL || !TREE_THIS_VOLATILE (fndecl)) break; fn = OVL_NEXT (fn); } while (fn); if (!fn) current_function_returns_abnormally = 1; } return result; } orig_args = make_tree_vector_copy (*args); if (!BASELINK_P (fn) && TREE_CODE (fn) != PSEUDO_DTOR_EXPR && TREE_TYPE (fn) != unknown_type_node) fn = build_non_dependent_expr (fn); make_args_non_dependent (*args); } if (TREE_CODE (fn) == COMPONENT_REF) { tree member = TREE_OPERAND (fn, 1); if (BASELINK_P (member)) { tree object = TREE_OPERAND (fn, 0); return build_new_method_call (object, member, args, NULL_TREE, (disallow_virtual ? LOOKUP_NORMAL | LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, complain); } } /* Per 13.3.1.1, '(&f)(...)' is the same as '(f)(...)'. */ if (TREE_CODE (fn) == ADDR_EXPR && TREE_CODE (TREE_OPERAND (fn, 0)) == OVERLOAD) fn = TREE_OPERAND (fn, 0); if (is_overloaded_fn (fn)) fn = baselink_for_fns (fn); result = NULL_TREE; if (BASELINK_P (fn)) { tree object; /* A call to a member function. From [over.call.func]: If the keyword this is in scope and refers to the class of that member function, or a derived class thereof, then the function call is transformed into a qualified function call using (*this) as the postfix-expression to the left of the . operator.... [Otherwise] a contrived object of type T becomes the implied object argument. In this situation: struct A { void f(); }; struct B : public A {}; struct C : public A { void g() { B::f(); }}; "the class of that member function" refers to `A'. But 11.2 [class.access.base] says that we need to convert 'this' to B* as part of the access, so we pass 'B' to maybe_dummy_object. */ object = maybe_dummy_object (BINFO_TYPE (BASELINK_ACCESS_BINFO (fn)), NULL); if (processing_template_decl) { if (type_dependent_expression_p (object)) { tree ret = build_nt_call_vec (orig_fn, orig_args); release_tree_vector (orig_args); return ret; } object = build_non_dependent_expr (object); } result = build_new_method_call (object, fn, args, NULL_TREE, (disallow_virtual ? LOOKUP_NORMAL|LOOKUP_NONVIRTUAL : LOOKUP_NORMAL), /*fn_p=*/NULL, complain); } else if (is_overloaded_fn (fn)) { /* If the function is an overloaded builtin, resolve it. */ if (TREE_CODE (fn) == FUNCTION_DECL && (DECL_BUILT_IN_CLASS (fn) == BUILT_IN_NORMAL || DECL_BUILT_IN_CLASS (fn) == BUILT_IN_MD)) result = resolve_overloaded_builtin (input_location, fn, *args); if (!result) { if (warn_sizeof_pointer_memaccess && !vec_safe_is_empty (*args) && !processing_template_decl) { location_t sizeof_arg_loc[3]; tree sizeof_arg[3]; unsigned int i; for (i = 0; i < 3; i++) { tree t; sizeof_arg_loc[i] = UNKNOWN_LOCATION; sizeof_arg[i] = NULL_TREE; if (i >= (*args)->length ()) continue; t = (**args)[i]; if (TREE_CODE (t) != SIZEOF_EXPR) continue; if (SIZEOF_EXPR_TYPE_P (t)) sizeof_arg[i] = TREE_TYPE (TREE_OPERAND (t, 0)); else sizeof_arg[i] = TREE_OPERAND (t, 0); sizeof_arg_loc[i] = EXPR_LOCATION (t); } sizeof_pointer_memaccess_warning (sizeof_arg_loc, fn, *args, sizeof_arg, same_type_ignoring_top_level_qualifiers_p); } /* A call to a namespace-scope function. */ result = build_new_function_call (fn, args, koenig_p, complain); } } else if (TREE_CODE (fn) == PSEUDO_DTOR_EXPR) { if (!vec_safe_is_empty (*args)) error ("arguments to destructor are not allowed"); /* Mark the pseudo-destructor call as having side-effects so that we do not issue warnings about its use. */ result = build1 (NOP_EXPR, void_type_node, TREE_OPERAND (fn, 0)); TREE_SIDE_EFFECTS (result) = 1; } else if (CLASS_TYPE_P (TREE_TYPE (fn))) /* If the "function" is really an object of class type, it might have an overloaded `operator ()'. */ result = build_op_call (fn, args, complain); if (!result) /* A call where the function is unknown. */ result = cp_build_function_call_vec (fn, args, complain); if (processing_template_decl && result != error_mark_node) { if (INDIRECT_REF_P (result)) result = TREE_OPERAND (result, 0); result = build_call_vec (TREE_TYPE (result), orig_fn, orig_args); SET_EXPR_LOCATION (result, input_location); KOENIG_LOOKUP_P (result) = koenig_p; release_tree_vector (orig_args); result = convert_from_reference (result); } if (koenig_p) { /* Free garbage OVERLOADs from arg-dependent lookup. */ tree next = NULL_TREE; for (fn = orig_fn; fn && TREE_CODE (fn) == OVERLOAD && OVL_ARG_DEPENDENT (fn); fn = next) { if (processing_template_decl) /* In a template, we'll re-use them at instantiation time. */ OVL_ARG_DEPENDENT (fn) = false; else { next = OVL_CHAIN (fn); ggc_free (fn); } } } return result; } /* Finish a call to a postfix increment or decrement or EXPR. (Which is indicated by CODE, which should be POSTINCREMENT_EXPR or POSTDECREMENT_EXPR.) */ tree finish_increment_expr (tree expr, enum tree_code code) { return build_x_unary_op (input_location, code, expr, tf_warning_or_error); } /* Finish a use of `this'. Returns an expression for `this'. */ tree finish_this_expr (void) { tree result = NULL_TREE; if (current_class_ptr) { tree type = TREE_TYPE (current_class_ref); /* In a lambda expression, 'this' refers to the captured 'this'. */ if (LAMBDA_TYPE_P (type)) result = lambda_expr_this_capture (CLASSTYPE_LAMBDA_EXPR (type), true); else result = current_class_ptr; } if (result) /* The keyword 'this' is a prvalue expression. */ return rvalue (result); tree fn = current_nonlambda_function (); if (fn && DECL_STATIC_FUNCTION_P (fn)) error ("%<this%> is unavailable for static member functions"); else if (fn) error ("invalid use of %<this%> in non-member function"); else error ("invalid use of %<this%> at top level"); return error_mark_node; } /* Finish a pseudo-destructor expression. If SCOPE is NULL, the expression was of the form `OBJECT.~DESTRUCTOR' where DESTRUCTOR is the TYPE for the type given. If SCOPE is non-NULL, the expression was of the form `OBJECT.SCOPE::~DESTRUCTOR'. */ tree finish_pseudo_destructor_expr (tree object, tree scope, tree destructor, location_t loc) { if (object == error_mark_node || destructor == error_mark_node) return error_mark_node; gcc_assert (TYPE_P (destructor)); if (!processing_template_decl) { if (scope == error_mark_node) { error_at (loc, "invalid qualifying scope in pseudo-destructor name"); return error_mark_node; } if (is_auto (destructor)) destructor = TREE_TYPE (object); if (scope && TYPE_P (scope) && !check_dtor_name (scope, destructor)) { error_at (loc, "qualified type %qT does not match destructor name ~%qT", scope, destructor); return error_mark_node; } /* [expr.pseudo] says both: The type designated by the pseudo-destructor-name shall be the same as the object type. and: The cv-unqualified versions of the object type and of the type designated by the pseudo-destructor-name shall be the same type. We implement the more generous second sentence, since that is what most other compilers do. */ if (!same_type_ignoring_top_level_qualifiers_p (TREE_TYPE (object), destructor)) { error_at (loc, "%qE is not of type %qT", object, destructor); return error_mark_node; } } return build3_loc (loc, PSEUDO_DTOR_EXPR, void_type_node, object, scope, destructor); } /* Finish an expression of the form CODE EXPR. */ tree finish_unary_op_expr (location_t loc, enum tree_code code, tree expr, tsubst_flags_t complain) { tree result = build_x_unary_op (loc, code, expr, complain); if ((complain & tf_warning) && TREE_OVERFLOW_P (result) && !TREE_OVERFLOW_P (expr)) overflow_warning (input_location, result); return result; } /* Finish a compound-literal expression. TYPE is the type to which the CONSTRUCTOR in COMPOUND_LITERAL is being cast. */ tree finish_compound_literal (tree type, tree compound_literal, tsubst_flags_t complain) { if (type == error_mark_node) return error_mark_node; if (TREE_CODE (type) == REFERENCE_TYPE) { compound_literal = finish_compound_literal (TREE_TYPE (type), compound_literal, complain); return cp_build_c_cast (type, compound_literal, complain); } if (!TYPE_OBJ_P (type)) { if (complain & tf_error) error ("compound literal of non-object type %qT", type); return error_mark_node; } if (processing_template_decl) { TREE_TYPE (compound_literal) = type; /* Mark the expression as a compound literal. */ TREE_HAS_CONSTRUCTOR (compound_literal) = 1; return compound_literal; } type = complete_type (type); if (TYPE_NON_AGGREGATE_CLASS (type)) { /* Trying to deal with a CONSTRUCTOR instead of a TREE_LIST everywhere that deals with function arguments would be a pain, so just wrap it in a TREE_LIST. The parser set a flag so we know that it came from T{} rather than T({}). */ CONSTRUCTOR_IS_DIRECT_INIT (compound_literal) = 1; compound_literal = build_tree_list (NULL_TREE, compound_literal); return build_functional_cast (type, compound_literal, complain); } if (TREE_CODE (type) == ARRAY_TYPE && check_array_initializer (NULL_TREE, type, compound_literal)) return error_mark_node; compound_literal = reshape_init (type, compound_literal, complain); if (SCALAR_TYPE_P (type) && !BRACE_ENCLOSED_INITIALIZER_P (compound_literal) && !check_narrowing (type, compound_literal, complain)) return error_mark_node; if (TREE_CODE (type) == ARRAY_TYPE && TYPE_DOMAIN (type) == NULL_TREE) { cp_complete_array_type_or_error (&type, compound_literal, false, complain); if (type == error_mark_node) return error_mark_node; } compound_literal = digest_init (type, compound_literal, complain); if (TREE_CODE (compound_literal) == CONSTRUCTOR) TREE_HAS_CONSTRUCTOR (compound_literal) = true; /* Put static/constant array temporaries in static variables, but always represent class temporaries with TARGET_EXPR so we elide copies. */ if ((!at_function_scope_p () || CP_TYPE_CONST_P (type)) && TREE_CODE (type) == ARRAY_TYPE && !TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type) && initializer_constant_valid_p (compound_literal, type)) { tree decl = create_temporary_var (type); DECL_INITIAL (decl) = compound_literal; TREE_STATIC (decl) = 1; if (literal_type_p (type) && CP_TYPE_CONST_NON_VOLATILE_P (type)) { /* 5.19 says that a constant expression can include an lvalue-rvalue conversion applied to "a glvalue of literal type that refers to a non-volatile temporary object initialized with a constant expression". Rather than try to communicate that this VAR_DECL is a temporary, just mark it constexpr. */ DECL_DECLARED_CONSTEXPR_P (decl) = true; DECL_INITIALIZED_BY_CONSTANT_EXPRESSION_P (decl) = true; TREE_CONSTANT (decl) = true; } cp_apply_type_quals_to_decl (cp_type_quals (type), decl); decl = pushdecl_top_level (decl); DECL_NAME (decl) = make_anon_name (); SET_DECL_ASSEMBLER_NAME (decl, DECL_NAME (decl)); /* Make sure the destructor is callable. */ tree clean = cxx_maybe_build_cleanup (decl, complain); if (clean == error_mark_node) return error_mark_node; return decl; } else return get_target_expr_sfinae (compound_literal, complain); } /* Return the declaration for the function-name variable indicated by ID. */ tree finish_fname (tree id) { tree decl; decl = fname_decl (input_location, C_RID_CODE (id), id); if (processing_template_decl && current_function_decl && decl != error_mark_node) decl = DECL_NAME (decl); return decl; } /* Finish a translation unit. */ void finish_translation_unit (void) { /* In case there were missing closebraces, get us back to the global binding level. */ pop_everything (); while (current_namespace != global_namespace) pop_namespace (); /* Do file scope __FUNCTION__ et al. */ finish_fname_decls (); } /* Finish a template type parameter, specified as AGGR IDENTIFIER. Returns the parameter. */ tree finish_template_type_parm (tree aggr, tree identifier) { if (aggr != class_type_node) { permerror (input_location, "template type parameters must use the keyword %<class%> or %<typename%>"); aggr = class_type_node; } return build_tree_list (aggr, identifier); } /* Finish a template template parameter, specified as AGGR IDENTIFIER. Returns the parameter. */ tree finish_template_template_parm (tree aggr, tree identifier) { tree decl = build_decl (input_location, TYPE_DECL, identifier, NULL_TREE); tree tmpl = build_lang_decl (TEMPLATE_DECL, identifier, NULL_TREE); DECL_TEMPLATE_PARMS (tmpl) = current_template_parms; DECL_TEMPLATE_RESULT (tmpl) = decl; DECL_ARTIFICIAL (decl) = 1; end_template_decl (); gcc_assert (DECL_TEMPLATE_PARMS (tmpl)); check_default_tmpl_args (decl, DECL_TEMPLATE_PARMS (tmpl), /*is_primary=*/true, /*is_partial=*/false, /*is_friend=*/0); return finish_template_type_parm (aggr, tmpl); } /* ARGUMENT is the default-argument value for a template template parameter. If ARGUMENT is invalid, issue error messages and return the ERROR_MARK_NODE. Otherwise, ARGUMENT itself is returned. */ tree check_template_template_default_arg (tree argument) { if (TREE_CODE (argument) != TEMPLATE_DECL && TREE_CODE (argument) != TEMPLATE_TEMPLATE_PARM && TREE_CODE (argument) != UNBOUND_CLASS_TEMPLATE) { if (TREE_CODE (argument) == TYPE_DECL) error ("invalid use of type %qT as a default value for a template " "template-parameter", TREE_TYPE (argument)); else error ("invalid default argument for a template template parameter"); return error_mark_node; } return argument; } /* Begin a class definition, as indicated by T. */ tree begin_class_definition (tree t) { if (error_operand_p (t) || error_operand_p (TYPE_MAIN_DECL (t))) return error_mark_node; if (processing_template_parmlist) { error ("definition of %q#T inside template parameter list", t); return error_mark_node; } /* According to the C++ ABI, decimal classes defined in ISO/IEC TR 24733 are passed the same as decimal scalar types. */ if (TREE_CODE (t) == RECORD_TYPE && !processing_template_decl) { tree ns = TYPE_CONTEXT (t); if (ns && TREE_CODE (ns) == NAMESPACE_DECL && DECL_CONTEXT (ns) == std_node && DECL_NAME (ns) && !strcmp (IDENTIFIER_POINTER (DECL_NAME (ns)), "decimal")) { const char *n = TYPE_NAME_STRING (t); if ((strcmp (n, "decimal32") == 0) || (strcmp (n, "decimal64") == 0) || (strcmp (n, "decimal128") == 0)) TYPE_TRANSPARENT_AGGR (t) = 1; } } /* A non-implicit typename comes from code like: template <typename T> struct A { template <typename U> struct A<T>::B ... This is erroneous. */ else if (TREE_CODE (t) == TYPENAME_TYPE) { error ("invalid definition of qualified type %qT", t); t = error_mark_node; } if (t == error_mark_node || ! MAYBE_CLASS_TYPE_P (t)) { t = make_class_type (RECORD_TYPE); pushtag (make_anon_name (), t, /*tag_scope=*/ts_current); } if (TYPE_BEING_DEFINED (t)) { t = make_class_type (TREE_CODE (t)); pushtag (TYPE_IDENTIFIER (t), t, /*tag_scope=*/ts_current); } maybe_process_partial_specialization (t); pushclass (t); TYPE_BEING_DEFINED (t) = 1; class_binding_level->defining_class_p = 1; if (flag_pack_struct) { tree v; TYPE_PACKED (t) = 1; /* Even though the type is being defined for the first time here, there might have been a forward declaration, so there might be cv-qualified variants of T. */ for (v = TYPE_NEXT_VARIANT (t); v; v = TYPE_NEXT_VARIANT (v)) TYPE_PACKED (v) = 1; } /* Reset the interface data, at the earliest possible moment, as it might have been set via a class foo; before. */ if (! TYPE_ANONYMOUS_P (t)) { struct c_fileinfo *finfo = \ get_fileinfo (LOCATION_FILE (input_location)); CLASSTYPE_INTERFACE_ONLY (t) = finfo->interface_only; SET_CLASSTYPE_INTERFACE_UNKNOWN_X (t, finfo->interface_unknown); } reset_specialization(); /* Make a declaration for this class in its own scope. */ build_self_reference (); return t; } /* Finish the member declaration given by DECL. */ void finish_member_declaration (tree decl) { if (decl == error_mark_node || decl == NULL_TREE) return; if (decl == void_type_node) /* The COMPONENT was a friend, not a member, and so there's nothing for us to do. */ return; /* We should see only one DECL at a time. */ gcc_assert (DECL_CHAIN (decl) == NULL_TREE); /* Set up access control for DECL. */ TREE_PRIVATE (decl) = (current_access_specifier == access_private_node); TREE_PROTECTED (decl) = (current_access_specifier == access_protected_node); if (TREE_CODE (decl) == TEMPLATE_DECL) { TREE_PRIVATE (DECL_TEMPLATE_RESULT (decl)) = TREE_PRIVATE (decl); TREE_PROTECTED (DECL_TEMPLATE_RESULT (decl)) = TREE_PROTECTED (decl); } /* Mark the DECL as a member of the current class, unless it's a member of an enumeration. */ if (TREE_CODE (decl) != CONST_DECL) DECL_CONTEXT (decl) = current_class_type; /* Check for bare parameter packs in the member variable declaration. */ if (TREE_CODE (decl) == FIELD_DECL) { if (check_for_bare_parameter_packs (TREE_TYPE (decl))) TREE_TYPE (decl) = error_mark_node; if (check_for_bare_parameter_packs (DECL_ATTRIBUTES (decl))) DECL_ATTRIBUTES (decl) = NULL_TREE; } /* [dcl.link] A C language linkage is ignored for the names of class members and the member function type of class member functions. */ if (DECL_LANG_SPECIFIC (decl) && DECL_LANGUAGE (decl) == lang_c) SET_DECL_LANGUAGE (decl, lang_cplusplus); /* Put functions on the TYPE_METHODS list and everything else on the TYPE_FIELDS list. Note that these are built up in reverse order. We reverse them (to obtain declaration order) in finish_struct. */ if (DECL_DECLARES_FUNCTION_P (decl)) { /* We also need to add this function to the CLASSTYPE_METHOD_VEC. */ if (add_method (current_class_type, decl, NULL_TREE)) { DECL_CHAIN (decl) = TYPE_METHODS (current_class_type); TYPE_METHODS (current_class_type) = decl; maybe_add_class_template_decl_list (current_class_type, decl, /*friend_p=*/0); } } /* Enter the DECL into the scope of the class, if the class isn't a closure (whose fields are supposed to be unnamed). */ else if (CLASSTYPE_LAMBDA_EXPR (current_class_type) || pushdecl_class_level (decl)) { if (TREE_CODE (decl) == USING_DECL) { /* For now, ignore class-scope USING_DECLS, so that debugging backends do not see them. */ DECL_IGNORED_P (decl) = 1; } /* All TYPE_DECLs go at the end of TYPE_FIELDS. Ordinary fields go at the beginning. The reason is that lookup_field_1 searches the list in order, and we want a field name to override a type name so that the "struct stat hack" will work. In particular: struct S { enum E { }; int E } s; s.E = 3; is valid. In addition, the FIELD_DECLs must be maintained in declaration order so that class layout works as expected. However, we don't need that order until class layout, so we save a little time by putting FIELD_DECLs on in reverse order here, and then reversing them in finish_struct_1. (We could also keep a pointer to the correct insertion points in the list.) */ if (TREE_CODE (decl) == TYPE_DECL) TYPE_FIELDS (current_class_type) = chainon (TYPE_FIELDS (current_class_type), decl); else { DECL_CHAIN (decl) = TYPE_FIELDS (current_class_type); TYPE_FIELDS (current_class_type) = decl; } maybe_add_class_template_decl_list (current_class_type, decl, /*friend_p=*/0); } if (pch_file) note_decl_for_pch (decl); } /* DECL has been declared while we are building a PCH file. Perform actions that we might normally undertake lazily, but which can be performed now so that they do not have to be performed in translation units which include the PCH file. */ void note_decl_for_pch (tree decl) { gcc_assert (pch_file); /* There's a good chance that we'll have to mangle names at some point, even if only for emission in debugging information. */ if (VAR_OR_FUNCTION_DECL_P (decl) && !processing_template_decl) mangle_decl (decl); } /* Finish processing a complete template declaration. The PARMS are the template parameters. */ void finish_template_decl (tree parms) { if (parms) end_template_decl (); else end_specialization (); } /* Finish processing a template-id (which names a type) of the form NAME < ARGS >. Return the TYPE_DECL for the type named by the template-id. If ENTERING_SCOPE is nonzero we are about to enter the scope of template-id indicated. */ tree finish_template_type (tree name, tree args, int entering_scope) { tree type; type = lookup_template_class (name, args, NULL_TREE, NULL_TREE, entering_scope, tf_warning_or_error | tf_user); if (type == error_mark_node) return type; else if (CLASS_TYPE_P (type) && !alias_type_or_template_p (type)) return TYPE_STUB_DECL (type); else return TYPE_NAME (type); } /* Finish processing a BASE_CLASS with the indicated ACCESS_SPECIFIER. Return a TREE_LIST containing the ACCESS_SPECIFIER and the BASE_CLASS, or NULL_TREE if an error occurred. The ACCESS_SPECIFIER is one of access_{default,public,protected_private}_node. For a virtual base we set TREE_TYPE. */ tree finish_base_specifier (tree base, tree access, bool virtual_p) { tree result; if (base == error_mark_node) { error ("invalid base-class specification"); result = NULL_TREE; } else if (! MAYBE_CLASS_TYPE_P (base)) { error ("%qT is not a class type", base); result = NULL_TREE; } else { if (cp_type_quals (base) != 0) { /* DR 484: Can a base-specifier name a cv-qualified class type? */ base = TYPE_MAIN_VARIANT (base); } result = build_tree_list (access, base); if (virtual_p) TREE_TYPE (result) = integer_type_node; } return result; } /* If FNS is a member function, a set of member functions, or a template-id referring to one or more member functions, return a BASELINK for FNS, incorporating the current access context. Otherwise, return FNS unchanged. */ tree baselink_for_fns (tree fns) { tree scope; tree cl; if (BASELINK_P (fns) || error_operand_p (fns)) return fns; scope = ovl_scope (fns); if (!CLASS_TYPE_P (scope)) return fns; cl = currently_open_derived_class (scope); if (!cl) cl = scope; cl = TYPE_BINFO (cl); return build_baselink (cl, cl, fns, /*optype=*/NULL_TREE); } /* Returns true iff DECL is a variable from a function outside the current one. */ static bool outer_var_p (tree decl) { return ((VAR_P (decl) || TREE_CODE (decl) == PARM_DECL) && DECL_FUNCTION_SCOPE_P (decl) && (DECL_CONTEXT (decl) != current_function_decl || parsing_nsdmi ())); } /* As above, but also checks that DECL is automatic. */ bool outer_automatic_var_p (tree decl) { return (outer_var_p (decl) && !TREE_STATIC (decl)); } /* DECL satisfies outer_automatic_var_p. Possibly complain about it or rewrite it for lambda capture. */ tree process_outer_var_ref (tree decl, tsubst_flags_t complain) { if (cp_unevaluated_operand) /* It's not a use (3.2) if we're in an unevaluated context. */ return decl; tree context = DECL_CONTEXT (decl); tree containing_function = current_function_decl; tree lambda_stack = NULL_TREE; tree lambda_expr = NULL_TREE; tree initializer = convert_from_reference (decl); /* Mark it as used now even if the use is ill-formed. */ mark_used (decl); /* Core issue 696: "[At the July 2009 meeting] the CWG expressed support for an approach in which a reference to a local [constant] automatic variable in a nested class or lambda body would enter the expression as an rvalue, which would reduce the complexity of the problem" FIXME update for final resolution of core issue 696. */ if (decl_maybe_constant_var_p (decl)) { if (processing_template_decl) /* In a template, the constant value may not be in a usable form, so wait until instantiation time. */ return decl; else if (decl_constant_var_p (decl)) return scalar_constant_value (decl); } if (parsing_nsdmi ()) containing_function = NULL_TREE; else /* If we are in a lambda function, we can move out until we hit 1. the context, 2. a non-lambda function, or 3. a non-default capturing lambda function. */ while (context != containing_function && LAMBDA_FUNCTION_P (containing_function)) { tree closure = DECL_CONTEXT (containing_function); lambda_expr = CLASSTYPE_LAMBDA_EXPR (closure); if (TYPE_CLASS_SCOPE_P (closure)) /* A lambda in an NSDMI (c++/64496). */ break; if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_NONE) break; lambda_stack = tree_cons (NULL_TREE, lambda_expr, lambda_stack); containing_function = decl_function_context (containing_function); } if (lambda_expr && TREE_CODE (decl) == VAR_DECL && DECL_ANON_UNION_VAR_P (decl)) { if (complain & tf_error) error ("cannot capture member %qD of anonymous union", decl); return error_mark_node; } if (context == containing_function) { decl = add_default_capture (lambda_stack, /*id=*/DECL_NAME (decl), initializer); } else if (lambda_expr) { if (complain & tf_error) { error ("%qD is not captured", decl); tree closure = LAMBDA_EXPR_CLOSURE (lambda_expr); if (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lambda_expr) == CPLD_NONE) inform (location_of (closure), "the lambda has no capture-default"); else if (TYPE_CLASS_SCOPE_P (closure)) inform (0, "lambda in local class %q+T cannot " "capture variables from the enclosing context", TYPE_CONTEXT (closure)); inform (input_location, "%q+#D declared here", decl); } return error_mark_node; } else { if (complain & tf_error) error (VAR_P (decl) ? G_("use of local variable with automatic storage from containing function") : G_("use of parameter from containing function")); inform (input_location, "%q+#D declared here", decl); return error_mark_node; } return decl; } /* ID_EXPRESSION is a representation of parsed, but unprocessed, id-expression. (See cp_parser_id_expression for details.) SCOPE, if non-NULL, is the type or namespace used to explicitly qualify ID_EXPRESSION. DECL is the entity to which that name has been resolved. *CONSTANT_EXPRESSION_P is true if we are presently parsing a constant-expression. In that case, *NON_CONSTANT_EXPRESSION_P will be set to true if this expression isn't permitted in a constant-expression, but it is otherwise not set by this function. *ALLOW_NON_CONSTANT_EXPRESSION_P is true if we are parsing a constant-expression, but a non-constant expression is also permissible. DONE is true if this expression is a complete postfix-expression; it is false if this expression is followed by '->', '[', '(', etc. ADDRESS_P is true iff this expression is the operand of '&'. TEMPLATE_P is true iff the qualified-id was of the form "A::template B". TEMPLATE_ARG_P is true iff this qualified name appears as a template argument. If an error occurs, and it is the kind of error that might cause the parser to abort a tentative parse, *ERROR_MSG is filled in. It is the caller's responsibility to issue the message. *ERROR_MSG will be a string with static storage duration, so the caller need not "free" it. Return an expression for the entity, after issuing appropriate diagnostics. This function is also responsible for transforming a reference to a non-static member into a COMPONENT_REF that makes the use of "this" explicit. Upon return, *IDK will be filled in appropriately. */ tree finish_id_expression (tree id_expression, tree decl, tree scope, cp_id_kind *idk, bool integral_constant_expression_p, bool allow_non_integral_constant_expression_p, bool *non_integral_constant_expression_p, bool template_p, bool done, bool address_p, bool template_arg_p, const char **error_msg, location_t location) { decl = strip_using_decl (decl); /* Initialize the output parameters. */ *idk = CP_ID_KIND_NONE; *error_msg = NULL; if (id_expression == error_mark_node) return error_mark_node; /* If we have a template-id, then no further lookup is required. If the template-id was for a template-class, we will sometimes have a TYPE_DECL at this point. */ else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR || TREE_CODE (decl) == TYPE_DECL) ; /* Look up the name. */ else { if (decl == error_mark_node) { /* Name lookup failed. */ if (scope && (!TYPE_P (scope) || (!dependent_type_p (scope) && !(identifier_p (id_expression) && IDENTIFIER_TYPENAME_P (id_expression) && dependent_type_p (TREE_TYPE (id_expression)))))) { /* If the qualifying type is non-dependent (and the name does not name a conversion operator to a dependent type), issue an error. */ qualified_name_lookup_error (scope, id_expression, decl, location); return error_mark_node; } else if (!scope) { /* It may be resolved via Koenig lookup. */ *idk = CP_ID_KIND_UNQUALIFIED; return id_expression; } else decl = id_expression; } /* If DECL is a variable that would be out of scope under ANSI/ISO rules, but in scope in the ARM, name lookup will succeed. Issue a diagnostic here. */ else decl = check_for_out_of_scope_variable (decl); /* Remember that the name was used in the definition of the current class so that we can check later to see if the meaning would have been different after the class was entirely defined. */ if (!scope && decl != error_mark_node && identifier_p (id_expression)) maybe_note_name_used_in_class (id_expression, decl); /* Disallow uses of local variables from containing functions, except within lambda-expressions. */ if (outer_automatic_var_p (decl)) { decl = process_outer_var_ref (decl, tf_warning_or_error); if (decl == error_mark_node) return error_mark_node; } /* Also disallow uses of function parameters outside the function body, except inside an unevaluated context (i.e. decltype). */ if (TREE_CODE (decl) == PARM_DECL && DECL_CONTEXT (decl) == NULL_TREE && !cp_unevaluated_operand) { *error_msg = "use of parameter outside function body"; return error_mark_node; } } /* If we didn't find anything, or what we found was a type, then this wasn't really an id-expression. */ if (TREE_CODE (decl) == TEMPLATE_DECL && !DECL_FUNCTION_TEMPLATE_P (decl)) { *error_msg = "missing template arguments"; return error_mark_node; } else if (TREE_CODE (decl) == TYPE_DECL || TREE_CODE (decl) == NAMESPACE_DECL) { *error_msg = "expected primary-expression"; return error_mark_node; } /* If the name resolved to a template parameter, there is no need to look it up again later. */ if ((TREE_CODE (decl) == CONST_DECL && DECL_TEMPLATE_PARM_P (decl)) || TREE_CODE (decl) == TEMPLATE_PARM_INDEX) { tree r; *idk = CP_ID_KIND_NONE; if (TREE_CODE (decl) == TEMPLATE_PARM_INDEX) decl = TEMPLATE_PARM_DECL (decl); r = convert_from_reference (DECL_INITIAL (decl)); if (integral_constant_expression_p && !dependent_type_p (TREE_TYPE (decl)) && !(INTEGRAL_OR_ENUMERATION_TYPE_P (TREE_TYPE (r)))) { if (!allow_non_integral_constant_expression_p) error ("template parameter %qD of type %qT is not allowed in " "an integral constant expression because it is not of " "integral or enumeration type", decl, TREE_TYPE (decl)); *non_integral_constant_expression_p = true; } return r; } else { bool dependent_p; /* If the declaration was explicitly qualified indicate that. The semantics of `A::f(3)' are different than `f(3)' if `f' is virtual. */ *idk = (scope ? CP_ID_KIND_QUALIFIED : (TREE_CODE (decl) == TEMPLATE_ID_EXPR ? CP_ID_KIND_TEMPLATE_ID : CP_ID_KIND_UNQUALIFIED)); /* [temp.dep.expr] An id-expression is type-dependent if it contains an identifier that was declared with a dependent type. The standard is not very specific about an id-expression that names a set of overloaded functions. What if some of them have dependent types and some of them do not? Presumably, such a name should be treated as a dependent name. */ /* Assume the name is not dependent. */ dependent_p = false; if (!processing_template_decl) /* No names are dependent outside a template. */ ; else if (TREE_CODE (decl) == CONST_DECL) /* We don't want to treat enumerators as dependent. */ ; /* A template-id where the name of the template was not resolved is definitely dependent. */ else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && (identifier_p (TREE_OPERAND (decl, 0)))) dependent_p = true; /* For anything except an overloaded function, just check its type. */ else if (!is_overloaded_fn (decl)) dependent_p = dependent_type_p (TREE_TYPE (decl)); /* For a set of overloaded functions, check each of the functions. */ else { tree fns = decl; if (BASELINK_P (fns)) fns = BASELINK_FUNCTIONS (fns); /* For a template-id, check to see if the template arguments are dependent. */ if (TREE_CODE (fns) == TEMPLATE_ID_EXPR) { tree args = TREE_OPERAND (fns, 1); dependent_p = any_dependent_template_arguments_p (args); /* The functions are those referred to by the template-id. */ fns = TREE_OPERAND (fns, 0); } /* If there are no dependent template arguments, go through the overloaded functions. */ while (fns && !dependent_p) { tree fn = OVL_CURRENT (fns); /* Member functions of dependent classes are dependent. */ if (TREE_CODE (fn) == FUNCTION_DECL && type_dependent_expression_p (fn)) dependent_p = true; else if (TREE_CODE (fn) == TEMPLATE_DECL && dependent_template_p (fn)) dependent_p = true; fns = OVL_NEXT (fns); } } /* If the name was dependent on a template parameter, we will resolve the name at instantiation time. */ if (dependent_p) { /* Create a SCOPE_REF for qualified names, if the scope is dependent. */ if (scope) { if (TYPE_P (scope)) { if (address_p && done) decl = finish_qualified_id_expr (scope, decl, done, address_p, template_p, template_arg_p, tf_warning_or_error); else { tree type = NULL_TREE; if (DECL_P (decl) && !dependent_scope_p (scope)) type = TREE_TYPE (decl); decl = build_qualified_name (type, scope, id_expression, template_p); } } if (TREE_TYPE (decl)) decl = convert_from_reference (decl); return decl; } /* A TEMPLATE_ID already contains all the information we need. */ if (TREE_CODE (id_expression) == TEMPLATE_ID_EXPR) return id_expression; *idk = CP_ID_KIND_UNQUALIFIED_DEPENDENT; /* If we found a variable, then name lookup during the instantiation will always resolve to the same VAR_DECL (or an instantiation thereof). */ if (VAR_P (decl) || TREE_CODE (decl) == PARM_DECL) { mark_used (decl); return convert_from_reference (decl); } /* The same is true for FIELD_DECL, but we also need to make sure that the syntax is correct. */ else if (TREE_CODE (decl) == FIELD_DECL) { /* Since SCOPE is NULL here, this is an unqualified name. Access checking has been performed during name lookup already. Turn off checking to avoid duplicate errors. */ push_deferring_access_checks (dk_no_check); decl = finish_non_static_data_member (decl, NULL_TREE, /*qualifying_scope=*/NULL_TREE); pop_deferring_access_checks (); return decl; } return id_expression; } if (TREE_CODE (decl) == NAMESPACE_DECL) { error ("use of namespace %qD as expression", decl); return error_mark_node; } else if (DECL_CLASS_TEMPLATE_P (decl)) { error ("use of class template %qT as expression", decl); return error_mark_node; } else if (TREE_CODE (decl) == TREE_LIST) { /* Ambiguous reference to base members. */ error ("request for member %qD is ambiguous in " "multiple inheritance lattice", id_expression); print_candidates (decl); return error_mark_node; } /* Mark variable-like entities as used. Functions are similarly marked either below or after overload resolution. */ if ((VAR_P (decl) || TREE_CODE (decl) == PARM_DECL || TREE_CODE (decl) == CONST_DECL || TREE_CODE (decl) == RESULT_DECL) && !mark_used (decl)) return error_mark_node; /* Only certain kinds of names are allowed in constant expression. Template parameters have already been handled above. */ if (! error_operand_p (decl) && integral_constant_expression_p && ! decl_constant_var_p (decl) && TREE_CODE (decl) != CONST_DECL && ! builtin_valid_in_constant_expr_p (decl)) { if (!allow_non_integral_constant_expression_p) { error ("%qD cannot appear in a constant-expression", decl); return error_mark_node; } *non_integral_constant_expression_p = true; } tree wrap; if (VAR_P (decl) && !cp_unevaluated_operand && !processing_template_decl && (TREE_STATIC (decl) || DECL_EXTERNAL (decl)) && DECL_THREAD_LOCAL_P (decl) && (wrap = get_tls_wrapper_fn (decl))) { /* Replace an evaluated use of the thread_local variable with a call to its wrapper. */ decl = build_cxx_call (wrap, 0, NULL, tf_warning_or_error); } else if (TREE_CODE (decl) == TEMPLATE_ID_EXPR && variable_template_p (TREE_OPERAND (decl, 0))) { decl = finish_template_variable (decl); mark_used (decl); } else if (scope) { decl = (adjust_result_of_qualified_name_lookup (decl, scope, current_nonlambda_class_type())); if (TREE_CODE (decl) == FUNCTION_DECL) mark_used (decl); if (TYPE_P (scope)) decl = finish_qualified_id_expr (scope, decl, done, address_p, template_p, template_arg_p, tf_warning_or_error); else decl = convert_from_reference (decl); } else if (TREE_CODE (decl) == FIELD_DECL) { /* Since SCOPE is NULL here, this is an unqualified name. Access checking has been performed during name lookup already. Turn off checking to avoid duplicate errors. */ push_deferring_access_checks (dk_no_check); decl = finish_non_static_data_member (decl, NULL_TREE, /*qualifying_scope=*/NULL_TREE); pop_deferring_access_checks (); } else if (is_overloaded_fn (decl)) { tree first_fn; first_fn = get_first_fn (decl); if (TREE_CODE (first_fn) == TEMPLATE_DECL) first_fn = DECL_TEMPLATE_RESULT (first_fn); if (!really_overloaded_fn (decl) && !mark_used (first_fn)) return error_mark_node; if (!template_arg_p && TREE_CODE (first_fn) == FUNCTION_DECL && DECL_FUNCTION_MEMBER_P (first_fn) && !shared_member_p (decl)) { /* A set of member functions. */ decl = maybe_dummy_object (DECL_CONTEXT (first_fn), 0); return finish_class_member_access_expr (decl, id_expression, /*template_p=*/false, tf_warning_or_error); } decl = baselink_for_fns (decl); } else { if (DECL_P (decl) && DECL_NONLOCAL (decl) && DECL_CLASS_SCOPE_P (decl)) { tree context = context_for_name_lookup (decl); if (context != current_class_type) { tree path = currently_open_derived_class (context); perform_or_defer_access_check (TYPE_BINFO (path), decl, decl, tf_warning_or_error); } } decl = convert_from_reference (decl); } } /* Handle references (c++/56130). */ tree t = REFERENCE_REF_P (decl) ? TREE_OPERAND (decl, 0) : decl; if (TREE_DEPRECATED (t)) warn_deprecated_use (t, NULL_TREE); return decl; } /* Implement the __typeof keyword: Return the type of EXPR, suitable for use as a type-specifier. */ tree finish_typeof (tree expr) { tree type; if (type_dependent_expression_p (expr)) { type = cxx_make_type (TYPEOF_TYPE); TYPEOF_TYPE_EXPR (type) = expr; SET_TYPE_STRUCTURAL_EQUALITY (type); return type; } expr = mark_type_use (expr); type = unlowered_expr_type (expr); if (!type || type == unknown_type_node) { error ("type of %qE is unknown", expr); return error_mark_node; } return type; } /* Implement the __underlying_type keyword: Return the underlying type of TYPE, suitable for use as a type-specifier. */ tree finish_underlying_type (tree type) { tree underlying_type; if (processing_template_decl) { underlying_type = cxx_make_type (UNDERLYING_TYPE); UNDERLYING_TYPE_TYPE (underlying_type) = type; SET_TYPE_STRUCTURAL_EQUALITY (underlying_type); return underlying_type; } complete_type (type); if (TREE_CODE (type) != ENUMERAL_TYPE) { error ("%qT is not an enumeration type", type); return error_mark_node; } underlying_type = ENUM_UNDERLYING_TYPE (type); /* Fixup necessary in this case because ENUM_UNDERLYING_TYPE includes TYPE_MIN_VALUE and TYPE_MAX_VALUE information. See finish_enum_value_list for details. */ if (!ENUM_FIXED_UNDERLYING_TYPE_P (type)) underlying_type = c_common_type_for_mode (TYPE_MODE (underlying_type), TYPE_UNSIGNED (underlying_type)); return underlying_type; } /* Implement the __direct_bases keyword: Return the direct base classes of type */ tree calculate_direct_bases (tree type) { vec<tree, va_gc> *vector = make_tree_vector(); tree bases_vec = NULL_TREE; vec<tree, va_gc> *base_binfos; tree binfo; unsigned i; complete_type (type); if (!NON_UNION_CLASS_TYPE_P (type)) return make_tree_vec (0); base_binfos = BINFO_BASE_BINFOS (TYPE_BINFO (type)); /* Virtual bases are initialized first */ for (i = 0; base_binfos->iterate (i, &binfo); i++) { if (BINFO_VIRTUAL_P (binfo)) { vec_safe_push (vector, binfo); } } /* Now non-virtuals */ for (i = 0; base_binfos->iterate (i, &binfo); i++) { if (!BINFO_VIRTUAL_P (binfo)) { vec_safe_push (vector, binfo); } } bases_vec = make_tree_vec (vector->length ()); for (i = 0; i < vector->length (); ++i) { TREE_VEC_ELT (bases_vec, i) = BINFO_TYPE ((*vector)[i]); } return bases_vec; } /* Implement the __bases keyword: Return the base classes of type */ /* Find morally non-virtual base classes by walking binfo hierarchy */ /* Virtual base classes are handled separately in finish_bases */ static tree dfs_calculate_bases_pre (tree binfo, void * /*data_*/) { /* Don't walk bases of virtual bases */ return BINFO_VIRTUAL_P (binfo) ? dfs_skip_bases : NULL_TREE; } static tree dfs_calculate_bases_post (tree binfo, void *data_) { vec<tree, va_gc> **data = ((vec<tree, va_gc> **) data_); if (!BINFO_VIRTUAL_P (binfo)) { vec_safe_push (*data, BINFO_TYPE (binfo)); } return NULL_TREE; } /* Calculates the morally non-virtual base classes of a class */ static vec<tree, va_gc> * calculate_bases_helper (tree type) { vec<tree, va_gc> *vector = make_tree_vector(); /* Now add non-virtual base classes in order of construction */ dfs_walk_all (TYPE_BINFO (type), dfs_calculate_bases_pre, dfs_calculate_bases_post, &vector); return vector; } tree calculate_bases (tree type) { vec<tree, va_gc> *vector = make_tree_vector(); tree bases_vec = NULL_TREE; unsigned i; vec<tree, va_gc> *vbases; vec<tree, va_gc> *nonvbases; tree binfo; complete_type (type); if (!NON_UNION_CLASS_TYPE_P (type)) return make_tree_vec (0); /* First go through virtual base classes */ for (vbases = CLASSTYPE_VBASECLASSES (type), i = 0; vec_safe_iterate (vbases, i, &binfo); i++) { vec<tree, va_gc> *vbase_bases; vbase_bases = calculate_bases_helper (BINFO_TYPE (binfo)); vec_safe_splice (vector, vbase_bases); release_tree_vector (vbase_bases); } /* Now for the non-virtual bases */ nonvbases = calculate_bases_helper (type); vec_safe_splice (vector, nonvbases); release_tree_vector (nonvbases); /* Last element is entire class, so don't copy */ bases_vec = make_tree_vec (vector->length () - 1); for (i = 0; i < vector->length () - 1; ++i) { TREE_VEC_ELT (bases_vec, i) = (*vector)[i]; } release_tree_vector (vector); return bases_vec; } tree finish_bases (tree type, bool direct) { tree bases = NULL_TREE; if (!processing_template_decl) { /* Parameter packs can only be used in templates */ error ("Parameter pack __bases only valid in template declaration"); return error_mark_node; } bases = cxx_make_type (BASES); BASES_TYPE (bases) = type; BASES_DIRECT (bases) = direct; SET_TYPE_STRUCTURAL_EQUALITY (bases); return bases; } /* Perform C++-specific checks for __builtin_offsetof before calling fold_offsetof. */ tree finish_offsetof (tree expr, location_t loc) { /* If we're processing a template, we can't finish the semantics yet. Otherwise we can fold the entire expression now. */ if (processing_template_decl) { expr = build1 (OFFSETOF_EXPR, size_type_node, expr); SET_EXPR_LOCATION (expr, loc); return expr; } if (TREE_CODE (expr) == PSEUDO_DTOR_EXPR) { error ("cannot apply %<offsetof%> to destructor %<~%T%>", TREE_OPERAND (expr, 2)); return error_mark_node; } if (TREE_CODE (TREE_TYPE (expr)) == FUNCTION_TYPE || TREE_CODE (TREE_TYPE (expr)) == METHOD_TYPE || TREE_TYPE (expr) == unknown_type_node) { if (INDIRECT_REF_P (expr)) error ("second operand of %<offsetof%> is neither a single " "identifier nor a sequence of member accesses and " "array references"); else { if (TREE_CODE (expr) == COMPONENT_REF || TREE_CODE (expr) == COMPOUND_EXPR) expr = TREE_OPERAND (expr, 1); error ("cannot apply %<offsetof%> to member function %qD", expr); } return error_mark_node; } if (REFERENCE_REF_P (expr)) expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) == COMPONENT_REF) { tree object = TREE_OPERAND (expr, 0); if (!complete_type_or_else (TREE_TYPE (object), object)) return error_mark_node; if (warn_invalid_offsetof && CLASS_TYPE_P (TREE_TYPE (object)) && CLASSTYPE_NON_STD_LAYOUT (TREE_TYPE (object)) && cp_unevaluated_operand == 0) pedwarn (loc, OPT_Winvalid_offsetof, "offsetof within non-standard-layout type %qT is undefined", TREE_TYPE (object)); } return fold_offsetof (expr); } /* Replace the AGGR_INIT_EXPR at *TP with an equivalent CALL_EXPR. This function is broken out from the above for the benefit of the tree-ssa project. */ void simplify_aggr_init_expr (tree *tp) { tree aggr_init_expr = *tp; /* Form an appropriate CALL_EXPR. */ tree fn = AGGR_INIT_EXPR_FN (aggr_init_expr); tree slot = AGGR_INIT_EXPR_SLOT (aggr_init_expr); tree type = TREE_TYPE (slot); tree call_expr; enum style_t { ctor, arg, pcc } style; if (AGGR_INIT_VIA_CTOR_P (aggr_init_expr)) style = ctor; #ifdef PCC_STATIC_STRUCT_RETURN else if (1) style = pcc; #endif else { gcc_assert (TREE_ADDRESSABLE (type)); style = arg; } call_expr = build_call_array_loc (input_location, TREE_TYPE (TREE_TYPE (TREE_TYPE (fn))), fn, aggr_init_expr_nargs (aggr_init_expr), AGGR_INIT_EXPR_ARGP (aggr_init_expr)); TREE_NOTHROW (call_expr) = TREE_NOTHROW (aggr_init_expr); CALL_EXPR_LIST_INIT_P (call_expr) = CALL_EXPR_LIST_INIT_P (aggr_init_expr); if (style == ctor) { /* Replace the first argument to the ctor with the address of the slot. */ cxx_mark_addressable (slot); CALL_EXPR_ARG (call_expr, 0) = build1 (ADDR_EXPR, build_pointer_type (type), slot); } else if (style == arg) { /* Just mark it addressable here, and leave the rest to expand_call{,_inline}. */ cxx_mark_addressable (slot); CALL_EXPR_RETURN_SLOT_OPT (call_expr) = true; call_expr = build2 (INIT_EXPR, TREE_TYPE (call_expr), slot, call_expr); } else if (style == pcc) { /* If we're using the non-reentrant PCC calling convention, then we need to copy the returned value out of the static buffer into the SLOT. */ push_deferring_access_checks (dk_no_check); call_expr = build_aggr_init (slot, call_expr, DIRECT_BIND | LOOKUP_ONLYCONVERTING, tf_warning_or_error); pop_deferring_access_checks (); call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (slot), call_expr, slot); } if (AGGR_INIT_ZERO_FIRST (aggr_init_expr)) { tree init = build_zero_init (type, NULL_TREE, /*static_storage_p=*/false); init = build2 (INIT_EXPR, void_type_node, slot, init); call_expr = build2 (COMPOUND_EXPR, TREE_TYPE (call_expr), init, call_expr); } *tp = call_expr; } /* Emit all thunks to FN that should be emitted when FN is emitted. */ void emit_associated_thunks (tree fn) { /* When we use vcall offsets, we emit thunks with the virtual functions to which they thunk. The whole point of vcall offsets is so that you can know statically the entire set of thunks that will ever be needed for a given virtual function, thereby enabling you to output all the thunks with the function itself. */ if (DECL_VIRTUAL_P (fn) /* Do not emit thunks for extern template instantiations. */ && ! DECL_REALLY_EXTERN (fn)) { tree thunk; for (thunk = DECL_THUNKS (fn); thunk; thunk = DECL_CHAIN (thunk)) { if (!THUNK_ALIAS (thunk)) { use_thunk (thunk, /*emit_p=*/1); if (DECL_RESULT_THUNK_P (thunk)) { tree probe; for (probe = DECL_THUNKS (thunk); probe; probe = DECL_CHAIN (probe)) use_thunk (probe, /*emit_p=*/1); } } else gcc_assert (!DECL_THUNKS (thunk)); } } } /* Generate RTL for FN. */ bool expand_or_defer_fn_1 (tree fn) { /* When the parser calls us after finishing the body of a template function, we don't really want to expand the body. */ if (processing_template_decl) { /* Normally, collection only occurs in rest_of_compilation. So, if we don't collect here, we never collect junk generated during the processing of templates until we hit a non-template function. It's not safe to do this inside a nested class, though, as the parser may have local state that is not a GC root. */ if (!function_depth) ggc_collect (); return false; } gcc_assert (DECL_SAVED_TREE (fn)); /* We make a decision about linkage for these functions at the end of the compilation. Until that point, we do not want the back end to output them -- but we do want it to see the bodies of these functions so that it can inline them as appropriate. */ if (DECL_DECLARED_INLINE_P (fn) || DECL_IMPLICIT_INSTANTIATION (fn)) { if (DECL_INTERFACE_KNOWN (fn)) /* We've already made a decision as to how this function will be handled. */; else if (!at_eof) tentative_decl_linkage (fn); else import_export_decl (fn); /* If the user wants us to keep all inline functions, then mark this function as needed so that finish_file will make sure to output it later. Similarly, all dllexport'd functions must be emitted; there may be callers in other DLLs. */ if (DECL_DECLARED_INLINE_P (fn) && !DECL_REALLY_EXTERN (fn) && (flag_keep_inline_functions || (flag_keep_inline_dllexport && lookup_attribute ("dllexport", DECL_ATTRIBUTES (fn))))) { mark_needed (fn); DECL_EXTERNAL (fn) = 0; } } /* If this is a constructor or destructor body, we have to clone it. */ if (maybe_clone_body (fn)) { /* We don't want to process FN again, so pretend we've written it out, even though we haven't. */ TREE_ASM_WRITTEN (fn) = 1; /* If this is an instantiation of a constexpr function, keep DECL_SAVED_TREE for explain_invalid_constexpr_fn. */ if (!is_instantiation_of_constexpr (fn)) DECL_SAVED_TREE (fn) = NULL_TREE; return false; } /* There's no reason to do any of the work here if we're only doing semantic analysis; this code just generates RTL. */ if (flag_syntax_only) return false; return true; } void expand_or_defer_fn (tree fn) { if (expand_or_defer_fn_1 (fn)) { function_depth++; /* Expand or defer, at the whim of the compilation unit manager. */ cgraph_node::finalize_function (fn, function_depth > 1); emit_associated_thunks (fn); function_depth--; } } struct nrv_data { nrv_data () : visited (37) {} tree var; tree result; hash_table<pointer_hash <tree_node> > visited; }; /* Helper function for walk_tree, used by finalize_nrv below. */ static tree finalize_nrv_r (tree* tp, int* walk_subtrees, void* data) { struct nrv_data *dp = (struct nrv_data *)data; tree_node **slot; /* No need to walk into types. There wouldn't be any need to walk into non-statements, except that we have to consider STMT_EXPRs. */ if (TYPE_P (*tp)) *walk_subtrees = 0; /* Change all returns to just refer to the RESULT_DECL; this is a nop, but differs from using NULL_TREE in that it indicates that we care about the value of the RESULT_DECL. */ else if (TREE_CODE (*tp) == RETURN_EXPR) TREE_OPERAND (*tp, 0) = dp->result; /* Change all cleanups for the NRV to only run when an exception is thrown. */ else if (TREE_CODE (*tp) == CLEANUP_STMT && CLEANUP_DECL (*tp) == dp->var) CLEANUP_EH_ONLY (*tp) = 1; /* Replace the DECL_EXPR for the NRV with an initialization of the RESULT_DECL, if needed. */ else if (TREE_CODE (*tp) == DECL_EXPR && DECL_EXPR_DECL (*tp) == dp->var) { tree init; if (DECL_INITIAL (dp->var) && DECL_INITIAL (dp->var) != error_mark_node) init = build2 (INIT_EXPR, void_type_node, dp->result, DECL_INITIAL (dp->var)); else init = build_empty_stmt (EXPR_LOCATION (*tp)); DECL_INITIAL (dp->var) = NULL_TREE; SET_EXPR_LOCATION (init, EXPR_LOCATION (*tp)); *tp = init; } /* And replace all uses of the NRV with the RESULT_DECL. */ else if (*tp == dp->var) *tp = dp->result; /* Avoid walking into the same tree more than once. Unfortunately, we can't just use walk_tree_without duplicates because it would only call us for the first occurrence of dp->var in the function body. */ slot = dp->visited.find_slot (*tp, INSERT); if (*slot) *walk_subtrees = 0; else *slot = *tp; /* Keep iterating. */ return NULL_TREE; } /* Called from finish_function to implement the named return value optimization by overriding all the RETURN_EXPRs and pertinent CLEANUP_STMTs and replacing all occurrences of VAR with RESULT, the RESULT_DECL for the function. */ void finalize_nrv (tree *tp, tree var, tree result) { struct nrv_data data; /* Copy name from VAR to RESULT. */ DECL_NAME (result) = DECL_NAME (var); /* Don't forget that we take its address. */ TREE_ADDRESSABLE (result) = TREE_ADDRESSABLE (var); /* Finally set DECL_VALUE_EXPR to avoid assigning a stack slot at -O0 for the original var and debug info uses RESULT location for VAR. */ SET_DECL_VALUE_EXPR (var, result); DECL_HAS_VALUE_EXPR_P (var) = 1; data.var = var; data.result = result; cp_walk_tree (tp, finalize_nrv_r, &data, 0); } /* Create CP_OMP_CLAUSE_INFO for clause C. Returns true if it is invalid. */ bool cxx_omp_create_clause_info (tree c, tree type, bool need_default_ctor, bool need_copy_ctor, bool need_copy_assignment, bool need_dtor) { int save_errorcount = errorcount; tree info, t; /* Always allocate 3 elements for simplicity. These are the function decls for the ctor, dtor, and assignment op. This layout is known to the three lang hooks, cxx_omp_clause_default_init, cxx_omp_clause_copy_init, and cxx_omp_clause_assign_op. */ info = make_tree_vec (3); CP_OMP_CLAUSE_INFO (c) = info; if (need_default_ctor || need_copy_ctor) { if (need_default_ctor) t = get_default_ctor (type); else t = get_copy_ctor (type, tf_warning_or_error); if (t && !trivial_fn_p (t)) TREE_VEC_ELT (info, 0) = t; } if (need_dtor && TYPE_HAS_NONTRIVIAL_DESTRUCTOR (type)) TREE_VEC_ELT (info, 1) = get_dtor (type, tf_warning_or_error); if (need_copy_assignment) { t = get_copy_assign (type); if (t && !trivial_fn_p (t)) TREE_VEC_ELT (info, 2) = t; } return errorcount != save_errorcount; } /* Helper function for handle_omp_array_sections. Called recursively to handle multiple array-section-subscripts. C is the clause, T current expression (initially OMP_CLAUSE_DECL), which is either a TREE_LIST for array-section-subscript (TREE_PURPOSE is low-bound expression if specified, TREE_VALUE length expression if specified, TREE_CHAIN is what it has been specified after, or some decl. TYPES vector is populated with array section types, MAYBE_ZERO_LEN set to true if any of the array-section-subscript could have length of zero (explicit or implicit), FIRST_NON_ONE is the index of the first array-section-subscript which is known not to have length of one. Given say: map(a[:b][2:1][:c][:2][:d][e:f][2:5]) FIRST_NON_ONE will be 3, array-section-subscript [:b], [2:1] and [:c] all are or may have length of 1, array-section-subscript [:2] is the first one knonwn not to have length 1. For array-section-subscript <= FIRST_NON_ONE we diagnose non-contiguous arrays if low bound isn't 0 or length isn't the array domain max + 1, for > FIRST_NON_ONE we can if MAYBE_ZERO_LEN is false. MAYBE_ZERO_LEN will be true in the above case though, as some lengths could be zero. */ static tree handle_omp_array_sections_1 (tree c, tree t, vec<tree> &types, bool &maybe_zero_len, unsigned int &first_non_one) { tree ret, low_bound, length, type; if (TREE_CODE (t) != TREE_LIST) { if (error_operand_p (t)) return error_mark_node; if (type_dependent_expression_p (t)) return NULL_TREE; if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) return NULL_TREE; if (DECL_P (t)) error_at (OMP_CLAUSE_LOCATION (c), "%qD is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error_at (OMP_CLAUSE_LOCATION (c), "%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } else if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && TREE_CODE (t) == VAR_DECL && DECL_THREAD_LOCAL_P (t)) { error_at (OMP_CLAUSE_LOCATION (c), "%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } t = convert_from_reference (t); return t; } ret = handle_omp_array_sections_1 (c, TREE_CHAIN (t), types, maybe_zero_len, first_non_one); if (ret == error_mark_node || ret == NULL_TREE) return ret; type = TREE_TYPE (ret); low_bound = TREE_PURPOSE (t); length = TREE_VALUE (t); if ((low_bound && type_dependent_expression_p (low_bound)) || (length && type_dependent_expression_p (length))) return NULL_TREE; if (low_bound == error_mark_node || length == error_mark_node) return error_mark_node; if (low_bound && !INTEGRAL_TYPE_P (TREE_TYPE (low_bound))) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE of array section does not have integral type", low_bound); return error_mark_node; } if (length && !INTEGRAL_TYPE_P (TREE_TYPE (length))) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE of array section does not have integral type", length); return error_mark_node; } if (low_bound) low_bound = mark_rvalue_use (low_bound); if (length) length = mark_rvalue_use (length); if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (length != NULL_TREE) { if (!integer_nonzerop (length)) maybe_zero_len = true; if (first_non_one == types.length () && (TREE_CODE (length) != INTEGER_CST || integer_onep (length))) first_non_one++; } if (TREE_CODE (type) == ARRAY_TYPE) { if (length == NULL_TREE && (TYPE_DOMAIN (type) == NULL_TREE || TYPE_MAX_VALUE (TYPE_DOMAIN (type)) == NULL_TREE)) { error_at (OMP_CLAUSE_LOCATION (c), "for unknown bound array type length expression must " "be specified"); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST && tree_int_cst_sgn (low_bound) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative low bound in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && tree_int_cst_sgn (length) == -1) { error_at (OMP_CLAUSE_LOCATION (c), "negative length in array section in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TYPE_DOMAIN (type) && TYPE_MAX_VALUE (TYPE_DOMAIN (type)) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (type))) == INTEGER_CST) { tree size = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (type)), size_one_node); if (TREE_CODE (low_bound) == INTEGER_CST) { if (tree_int_cst_lt (size, low_bound)) { error_at (OMP_CLAUSE_LOCATION (c), "low bound %qE above array section size " "in %qs clause", low_bound, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (tree_int_cst_equal (size, low_bound)) maybe_zero_len = true; else if (length == NULL_TREE && first_non_one == types.length () && tree_int_cst_equal (TYPE_MAX_VALUE (TYPE_DOMAIN (type)), low_bound)) first_non_one++; } else if (length == NULL_TREE) { maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } if (length && TREE_CODE (length) == INTEGER_CST) { if (tree_int_cst_lt (size, length)) { error_at (OMP_CLAUSE_LOCATION (c), "length %qE above array section size " "in %qs clause", length, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } if (TREE_CODE (low_bound) == INTEGER_CST) { tree lbpluslen = size_binop (PLUS_EXPR, fold_convert (sizetype, low_bound), fold_convert (sizetype, length)); if (TREE_CODE (lbpluslen) == INTEGER_CST && tree_int_cst_lt (size, lbpluslen)) { error_at (OMP_CLAUSE_LOCATION (c), "high bound %qE above array section size " "in %qs clause", lbpluslen, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } } } else if (length == NULL_TREE) { maybe_zero_len = true; if (first_non_one == types.length ()) first_non_one++; } /* For [lb:] we will need to evaluate lb more than once. */ if (length == NULL_TREE && OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) { tree lb = cp_save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } } } else if (TREE_CODE (type) == POINTER_TYPE) { if (length == NULL_TREE) { error_at (OMP_CLAUSE_LOCATION (c), "for pointer type length expression must be specified"); return error_mark_node; } /* If there is a pointer type anywhere but in the very first array-section-subscript, the array section can't be contiguous. */ if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND && TREE_CODE (TREE_CHAIN (t)) == TREE_LIST) { error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return error_mark_node; } } else { error_at (OMP_CLAUSE_LOCATION (c), "%qE does not have pointer or array type", ret); return error_mark_node; } if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND) types.safe_push (TREE_TYPE (ret)); /* We will need to evaluate lb more than once. */ tree lb = cp_save_expr (low_bound); if (lb != low_bound) { TREE_PURPOSE (t) = lb; low_bound = lb; } ret = grok_array_decl (OMP_CLAUSE_LOCATION (c), ret, low_bound, false); return ret; } /* Handle array sections for clause C. */ static bool handle_omp_array_sections (tree c) { bool maybe_zero_len = false; unsigned int first_non_one = 0; auto_vec<tree> types; tree first = handle_omp_array_sections_1 (c, OMP_CLAUSE_DECL (c), types, maybe_zero_len, first_non_one); if (first == error_mark_node) return true; if (first == NULL_TREE) return false; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND) { tree t = OMP_CLAUSE_DECL (c); tree tem = NULL_TREE; if (processing_template_decl) return false; /* Need to evaluate side effects in the length expressions if any. */ while (TREE_CODE (t) == TREE_LIST) { if (TREE_VALUE (t) && TREE_SIDE_EFFECTS (TREE_VALUE (t))) { if (tem == NULL_TREE) tem = TREE_VALUE (t); else tem = build2 (COMPOUND_EXPR, TREE_TYPE (tem), TREE_VALUE (t), tem); } t = TREE_CHAIN (t); } if (tem) first = build2 (COMPOUND_EXPR, TREE_TYPE (first), tem, first); OMP_CLAUSE_DECL (c) = first; } else { unsigned int num = types.length (), i; tree t, side_effects = NULL_TREE, size = NULL_TREE; tree condition = NULL_TREE; if (int_size_in_bytes (TREE_TYPE (first)) <= 0) maybe_zero_len = true; if (processing_template_decl && maybe_zero_len) return false; for (i = num, t = OMP_CLAUSE_DECL (c); i > 0; t = TREE_CHAIN (t)) { tree low_bound = TREE_PURPOSE (t); tree length = TREE_VALUE (t); i--; if (low_bound && TREE_CODE (low_bound) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (low_bound)) > TYPE_PRECISION (sizetype)) low_bound = fold_convert (sizetype, low_bound); if (length && TREE_CODE (length) == INTEGER_CST && TYPE_PRECISION (TREE_TYPE (length)) > TYPE_PRECISION (sizetype)) length = fold_convert (sizetype, length); if (low_bound == NULL_TREE) low_bound = integer_zero_node; if (!maybe_zero_len && i > first_non_one) { if (integer_nonzerop (low_bound)) goto do_warn_noncontiguous; if (length != NULL_TREE && TREE_CODE (length) == INTEGER_CST && TYPE_DOMAIN (types[i]) && TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])) && TREE_CODE (TYPE_MAX_VALUE (TYPE_DOMAIN (types[i]))) == INTEGER_CST) { tree size; size = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); if (!tree_int_cst_equal (length, size)) { do_warn_noncontiguous: error_at (OMP_CLAUSE_LOCATION (c), "array section is not contiguous in %qs " "clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); return true; } } if (!processing_template_decl && length != NULL_TREE && TREE_SIDE_EFFECTS (length)) { if (side_effects == NULL_TREE) side_effects = length; else side_effects = build2 (COMPOUND_EXPR, TREE_TYPE (side_effects), length, side_effects); } } else if (processing_template_decl) continue; else { tree l; if (i > first_non_one && length && integer_nonzerop (length)) continue; if (length) l = fold_convert (sizetype, length); else { l = size_binop (PLUS_EXPR, TYPE_MAX_VALUE (TYPE_DOMAIN (types[i])), size_one_node); l = size_binop (MINUS_EXPR, l, fold_convert (sizetype, low_bound)); } if (i > first_non_one) { l = fold_build2 (NE_EXPR, boolean_type_node, l, size_zero_node); if (condition == NULL_TREE) condition = l; else condition = fold_build2 (BIT_AND_EXPR, boolean_type_node, l, condition); } else if (size == NULL_TREE) { size = size_in_bytes (TREE_TYPE (types[i])); size = size_binop (MULT_EXPR, size, l); if (condition) size = fold_build3 (COND_EXPR, sizetype, condition, size, size_zero_node); } else size = size_binop (MULT_EXPR, size, l); } } if (!processing_template_decl) { if (side_effects) size = build2 (COMPOUND_EXPR, sizetype, side_effects, size); OMP_CLAUSE_DECL (c) = first; OMP_CLAUSE_SIZE (c) = size; if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) return false; tree c2 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (c2, GOMP_MAP_POINTER); if (!cxx_mark_addressable (t)) return false; OMP_CLAUSE_DECL (c2) = t; t = build_fold_addr_expr (first); t = fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, t); tree ptr = OMP_CLAUSE_DECL (c2); ptr = convert_from_reference (ptr); if (!POINTER_TYPE_P (TREE_TYPE (ptr))) ptr = build_fold_addr_expr (ptr); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, ptrdiff_type_node, t, fold_convert_loc (OMP_CLAUSE_LOCATION (c), ptrdiff_type_node, ptr)); OMP_CLAUSE_SIZE (c2) = t; OMP_CLAUSE_CHAIN (c2) = OMP_CLAUSE_CHAIN (c); OMP_CLAUSE_CHAIN (c) = c2; ptr = OMP_CLAUSE_DECL (c2); if (TREE_CODE (TREE_TYPE (ptr)) == REFERENCE_TYPE && POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (ptr)))) { tree c3 = build_omp_clause (OMP_CLAUSE_LOCATION (c), OMP_CLAUSE_MAP); OMP_CLAUSE_SET_MAP_KIND (c3, GOMP_MAP_POINTER); OMP_CLAUSE_DECL (c3) = ptr; OMP_CLAUSE_DECL (c2) = convert_from_reference (ptr); OMP_CLAUSE_SIZE (c3) = size_zero_node; OMP_CLAUSE_CHAIN (c3) = OMP_CLAUSE_CHAIN (c2); OMP_CLAUSE_CHAIN (c2) = c3; } } } return false; } /* Return identifier to look up for omp declare reduction. */ tree omp_reduction_id (enum tree_code reduction_code, tree reduction_id, tree type) { const char *p = NULL; const char *m = NULL; switch (reduction_code) { case PLUS_EXPR: case MULT_EXPR: case MINUS_EXPR: case BIT_AND_EXPR: case BIT_XOR_EXPR: case BIT_IOR_EXPR: case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: reduction_id = ansi_opname (reduction_code); break; case MIN_EXPR: p = "min"; break; case MAX_EXPR: p = "max"; break; default: break; } if (p == NULL) { if (TREE_CODE (reduction_id) != IDENTIFIER_NODE) return error_mark_node; p = IDENTIFIER_POINTER (reduction_id); } if (type != NULL_TREE) m = mangle_type_string (TYPE_MAIN_VARIANT (type)); const char prefix[] = "omp declare reduction "; size_t lenp = sizeof (prefix); if (strncmp (p, prefix, lenp - 1) == 0) lenp = 1; size_t len = strlen (p); size_t lenm = m ? strlen (m) + 1 : 0; char *name = XALLOCAVEC (char, lenp + len + lenm); if (lenp > 1) memcpy (name, prefix, lenp - 1); memcpy (name + lenp - 1, p, len + 1); if (m) { name[lenp + len - 1] = '~'; memcpy (name + lenp + len, m, lenm); } return get_identifier (name); } /* Lookup OpenMP UDR ID for TYPE, return the corresponding artificial FUNCTION_DECL or NULL_TREE if not found. */ static tree omp_reduction_lookup (location_t loc, tree id, tree type, tree *baselinkp, vec<tree> *ambiguousp) { tree orig_id = id; tree baselink = NULL_TREE; if (identifier_p (id)) { cp_id_kind idk; bool nonint_cst_expression_p; const char *error_msg; id = omp_reduction_id (ERROR_MARK, id, type); tree decl = lookup_name (id); if (decl == NULL_TREE) decl = error_mark_node; id = finish_id_expression (id, decl, NULL_TREE, &idk, false, true, &nonint_cst_expression_p, false, true, false, false, &error_msg, loc); if (idk == CP_ID_KIND_UNQUALIFIED && identifier_p (id)) { vec<tree, va_gc> *args = NULL; vec_safe_push (args, build_reference_type (type)); id = perform_koenig_lookup (id, args, tf_none); } } else if (TREE_CODE (id) == SCOPE_REF) id = lookup_qualified_name (TREE_OPERAND (id, 0), omp_reduction_id (ERROR_MARK, TREE_OPERAND (id, 1), type), false, false); tree fns = id; if (id && is_overloaded_fn (id)) id = get_fns (id); for (; id; id = OVL_NEXT (id)) { tree fndecl = OVL_CURRENT (id); if (TREE_CODE (fndecl) == FUNCTION_DECL) { tree argtype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (fndecl))); if (same_type_p (TREE_TYPE (argtype), type)) break; } } if (id && BASELINK_P (fns)) { if (baselinkp) *baselinkp = fns; else baselink = fns; } if (id == NULL_TREE && CLASS_TYPE_P (type) && TYPE_BINFO (type)) { vec<tree> ambiguous = vNULL; tree binfo = TYPE_BINFO (type), base_binfo, ret = NULL_TREE; unsigned int ix; if (ambiguousp == NULL) ambiguousp = &ambiguous; for (ix = 0; BINFO_BASE_ITERATE (binfo, ix, base_binfo); ix++) { id = omp_reduction_lookup (loc, orig_id, BINFO_TYPE (base_binfo), baselinkp ? baselinkp : &baselink, ambiguousp); if (id == NULL_TREE) continue; if (!ambiguousp->is_empty ()) ambiguousp->safe_push (id); else if (ret != NULL_TREE) { ambiguousp->safe_push (ret); ambiguousp->safe_push (id); ret = NULL_TREE; } else ret = id; } if (ambiguousp != &ambiguous) return ret; if (!ambiguous.is_empty ()) { const char *str = _("candidates are:"); unsigned int idx; tree udr; error_at (loc, "user defined reduction lookup is ambiguous"); FOR_EACH_VEC_ELT (ambiguous, idx, udr) { inform (DECL_SOURCE_LOCATION (udr), "%s %#D", str, udr); if (idx == 0) str = get_spaces (str); } ambiguous.release (); ret = error_mark_node; baselink = NULL_TREE; } id = ret; } if (id && baselink) perform_or_defer_access_check (BASELINK_BINFO (baselink), id, id, tf_warning_or_error); return id; } /* Helper function for cp_parser_omp_declare_reduction_exprs and tsubst_omp_udr. Remove CLEANUP_STMT for data (omp_priv variable). Also append INIT_EXPR for DECL_INITIAL of omp_priv after its DECL_EXPR. */ tree cp_remove_omp_priv_cleanup_stmt (tree *tp, int *walk_subtrees, void *data) { if (TYPE_P (*tp)) *walk_subtrees = 0; else if (TREE_CODE (*tp) == CLEANUP_STMT && CLEANUP_DECL (*tp) == (tree) data) *tp = CLEANUP_BODY (*tp); else if (TREE_CODE (*tp) == DECL_EXPR) { tree decl = DECL_EXPR_DECL (*tp); if (!processing_template_decl && decl == (tree) data && DECL_INITIAL (decl) && DECL_INITIAL (decl) != error_mark_node) { tree list = NULL_TREE; append_to_statement_list_force (*tp, &list); tree init_expr = build2 (INIT_EXPR, void_type_node, decl, DECL_INITIAL (decl)); DECL_INITIAL (decl) = NULL_TREE; append_to_statement_list_force (init_expr, &list); *tp = list; } } return NULL_TREE; } /* Data passed from cp_check_omp_declare_reduction to cp_check_omp_declare_reduction_r. */ struct cp_check_omp_declare_reduction_data { location_t loc; tree stmts[7]; bool combiner_p; }; /* Helper function for cp_check_omp_declare_reduction, called via cp_walk_tree. */ static tree cp_check_omp_declare_reduction_r (tree *tp, int *, void *data) { struct cp_check_omp_declare_reduction_data *udr_data = (struct cp_check_omp_declare_reduction_data *) data; if (SSA_VAR_P (*tp) && !DECL_ARTIFICIAL (*tp) && *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 0 : 3]) && *tp != DECL_EXPR_DECL (udr_data->stmts[udr_data->combiner_p ? 1 : 4])) { location_t loc = udr_data->loc; if (udr_data->combiner_p) error_at (loc, "%<#pragma omp declare reduction%> combiner refers to " "variable %qD which is not %<omp_out%> nor %<omp_in%>", *tp); else error_at (loc, "%<#pragma omp declare reduction%> initializer refers " "to variable %qD which is not %<omp_priv%> nor " "%<omp_orig%>", *tp); return *tp; } return NULL_TREE; } /* Diagnose violation of OpenMP #pragma omp declare reduction restrictions. */ void cp_check_omp_declare_reduction (tree udr) { tree type = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (udr))); gcc_assert (TREE_CODE (type) == REFERENCE_TYPE); type = TREE_TYPE (type); int i; location_t loc = DECL_SOURCE_LOCATION (udr); if (type == error_mark_node) return; if (ARITHMETIC_TYPE_P (type)) { static enum tree_code predef_codes[] = { PLUS_EXPR, MULT_EXPR, MINUS_EXPR, BIT_AND_EXPR, BIT_XOR_EXPR, BIT_IOR_EXPR, TRUTH_ANDIF_EXPR, TRUTH_ORIF_EXPR }; for (i = 0; i < 8; i++) { tree id = omp_reduction_id (predef_codes[i], NULL_TREE, NULL_TREE); const char *n1 = IDENTIFIER_POINTER (DECL_NAME (udr)); const char *n2 = IDENTIFIER_POINTER (id); if (strncmp (n1, n2, IDENTIFIER_LENGTH (id)) == 0 && (n1[IDENTIFIER_LENGTH (id)] == '~' || n1[IDENTIFIER_LENGTH (id)] == '\0')) break; } if (i == 8 && TREE_CODE (type) != COMPLEX_EXPR) { const char prefix_minmax[] = "omp declare reduction m"; size_t prefix_size = sizeof (prefix_minmax) - 1; const char *n = IDENTIFIER_POINTER (DECL_NAME (udr)); if (strncmp (IDENTIFIER_POINTER (DECL_NAME (udr)), prefix_minmax, prefix_size) == 0 && ((n[prefix_size] == 'i' && n[prefix_size + 1] == 'n') || (n[prefix_size] == 'a' && n[prefix_size + 1] == 'x')) && (n[prefix_size + 2] == '~' || n[prefix_size + 2] == '\0')) i = 0; } if (i < 8) { error_at (loc, "predeclared arithmetic type %qT in " "%<#pragma omp declare reduction%>", type); return; } } else if (TREE_CODE (type) == FUNCTION_TYPE || TREE_CODE (type) == METHOD_TYPE || TREE_CODE (type) == ARRAY_TYPE) { error_at (loc, "function or array type %qT in " "%<#pragma omp declare reduction%>", type); return; } else if (TREE_CODE (type) == REFERENCE_TYPE) { error_at (loc, "reference type %qT in %<#pragma omp declare reduction%>", type); return; } else if (TYPE_QUALS_NO_ADDR_SPACE (type)) { error_at (loc, "const, volatile or __restrict qualified type %qT in " "%<#pragma omp declare reduction%>", type); return; } tree body = DECL_SAVED_TREE (udr); if (body == NULL_TREE || TREE_CODE (body) != STATEMENT_LIST) return; tree_stmt_iterator tsi; struct cp_check_omp_declare_reduction_data data; memset (data.stmts, 0, sizeof data.stmts); for (i = 0, tsi = tsi_start (body); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) data.stmts[i] = tsi_stmt (tsi); data.loc = loc; gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (data.stmts[0]) == DECL_EXPR && TREE_CODE (data.stmts[1]) == DECL_EXPR); if (TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0]))) return; data.combiner_p = true; if (cp_walk_tree (&data.stmts[2], cp_check_omp_declare_reduction_r, &data, NULL)) TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1; } if (i >= 6) { gcc_assert (TREE_CODE (data.stmts[3]) == DECL_EXPR && TREE_CODE (data.stmts[4]) == DECL_EXPR); data.combiner_p = false; if (cp_walk_tree (&data.stmts[5], cp_check_omp_declare_reduction_r, &data, NULL) || cp_walk_tree (&DECL_INITIAL (DECL_EXPR_DECL (data.stmts[3])), cp_check_omp_declare_reduction_r, &data, NULL)) TREE_NO_WARNING (DECL_EXPR_DECL (data.stmts[0])) = 1; if (i == 7) gcc_assert (TREE_CODE (data.stmts[6]) == DECL_EXPR); } } /* Helper function of finish_omp_clauses. Clone STMT as if we were making an inline call. But, remap the OMP_DECL1 VAR_DECL (omp_out resp. omp_orig) to PLACEHOLDER and OMP_DECL2 VAR_DECL (omp_in resp. omp_priv) to DECL. */ static tree clone_omp_udr (tree stmt, tree omp_decl1, tree omp_decl2, tree decl, tree placeholder) { copy_body_data id; hash_map<tree, tree> decl_map; decl_map.put (omp_decl1, placeholder); decl_map.put (omp_decl2, decl); memset (&id, 0, sizeof (id)); id.src_fn = DECL_CONTEXT (omp_decl1); id.dst_fn = current_function_decl; id.src_cfun = DECL_STRUCT_FUNCTION (id.src_fn); id.decl_map = &decl_map; id.copy_decl = copy_decl_no_change; id.transform_call_graph_edges = CB_CGE_DUPLICATE; id.transform_new_cfg = true; id.transform_return_to_modify = false; id.transform_lang_insert_block = NULL; id.eh_lp_nr = 0; walk_tree (&stmt, copy_tree_body_r, &id, NULL); return stmt; } /* Helper function of finish_omp_clauses, called via cp_walk_tree. Find OMP_CLAUSE_PLACEHOLDER (passed in DATA) in *TP. */ static tree find_omp_placeholder_r (tree *tp, int *, void *data) { if (*tp == (tree) data) return *tp; return NULL_TREE; } /* Helper function of finish_omp_clauses. Handle OMP_CLAUSE_REDUCTION C. Return true if there is some error and the clause should be removed. */ static bool finish_omp_reduction_clause (tree c, bool *need_default_ctor, bool *need_dtor) { tree t = OMP_CLAUSE_DECL (c); bool predefined = false; tree type = TREE_TYPE (t); if (TREE_CODE (type) == REFERENCE_TYPE) type = TREE_TYPE (type); if (type == error_mark_node) return true; else if (ARITHMETIC_TYPE_P (type)) switch (OMP_CLAUSE_REDUCTION_CODE (c)) { case PLUS_EXPR: case MULT_EXPR: case MINUS_EXPR: predefined = true; break; case MIN_EXPR: case MAX_EXPR: if (TREE_CODE (type) == COMPLEX_TYPE) break; predefined = true; break; case BIT_AND_EXPR: case BIT_IOR_EXPR: case BIT_XOR_EXPR: if (FLOAT_TYPE_P (type) || TREE_CODE (type) == COMPLEX_TYPE) break; predefined = true; break; case TRUTH_ANDIF_EXPR: case TRUTH_ORIF_EXPR: if (FLOAT_TYPE_P (type)) break; predefined = true; break; default: break; } else if (TREE_CODE (type) == ARRAY_TYPE || TYPE_READONLY (type)) { error ("%qE has invalid type for %<reduction%>", t); return true; } else if (!processing_template_decl) { t = require_complete_type (t); if (t == error_mark_node) return true; OMP_CLAUSE_DECL (c) = t; } if (predefined) { OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE; return false; } else if (processing_template_decl) return false; tree id = OMP_CLAUSE_REDUCTION_PLACEHOLDER (c); type = TYPE_MAIN_VARIANT (TREE_TYPE (t)); if (TREE_CODE (type) == REFERENCE_TYPE) type = TREE_TYPE (type); OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = NULL_TREE; if (id == NULL_TREE) id = omp_reduction_id (OMP_CLAUSE_REDUCTION_CODE (c), NULL_TREE, NULL_TREE); id = omp_reduction_lookup (OMP_CLAUSE_LOCATION (c), id, type, NULL, NULL); if (id) { if (id == error_mark_node) return true; id = OVL_CURRENT (id); mark_used (id); tree body = DECL_SAVED_TREE (id); if (!body) return true; if (TREE_CODE (body) == STATEMENT_LIST) { tree_stmt_iterator tsi; tree placeholder = NULL_TREE; int i; tree stmts[7]; tree atype = TREE_VALUE (TYPE_ARG_TYPES (TREE_TYPE (id))); atype = TREE_TYPE (atype); bool need_static_cast = !same_type_p (type, atype); memset (stmts, 0, sizeof stmts); for (i = 0, tsi = tsi_start (body); i < 7 && !tsi_end_p (tsi); i++, tsi_next (&tsi)) stmts[i] = tsi_stmt (tsi); gcc_assert (tsi_end_p (tsi)); if (i >= 3) { gcc_assert (TREE_CODE (stmts[0]) == DECL_EXPR && TREE_CODE (stmts[1]) == DECL_EXPR); placeholder = build_lang_decl (VAR_DECL, NULL_TREE, type); DECL_ARTIFICIAL (placeholder) = 1; DECL_IGNORED_P (placeholder) = 1; OMP_CLAUSE_REDUCTION_PLACEHOLDER (c) = placeholder; if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[0]))) cxx_mark_addressable (placeholder); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[1])) && TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) != REFERENCE_TYPE) cxx_mark_addressable (OMP_CLAUSE_DECL (c)); tree omp_out = placeholder; tree omp_in = convert_from_reference (OMP_CLAUSE_DECL (c)); if (need_static_cast) { tree rtype = build_reference_type (atype); omp_out = build_static_cast (rtype, omp_out, tf_warning_or_error); omp_in = build_static_cast (rtype, omp_in, tf_warning_or_error); if (omp_out == error_mark_node || omp_in == error_mark_node) return true; omp_out = convert_from_reference (omp_out); omp_in = convert_from_reference (omp_in); } OMP_CLAUSE_REDUCTION_MERGE (c) = clone_omp_udr (stmts[2], DECL_EXPR_DECL (stmts[0]), DECL_EXPR_DECL (stmts[1]), omp_in, omp_out); } if (i >= 6) { gcc_assert (TREE_CODE (stmts[3]) == DECL_EXPR && TREE_CODE (stmts[4]) == DECL_EXPR); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[3]))) cxx_mark_addressable (OMP_CLAUSE_DECL (c)); if (TREE_ADDRESSABLE (DECL_EXPR_DECL (stmts[4]))) cxx_mark_addressable (placeholder); tree omp_priv = convert_from_reference (OMP_CLAUSE_DECL (c)); tree omp_orig = placeholder; if (need_static_cast) { if (i == 7) { error_at (OMP_CLAUSE_LOCATION (c), "user defined reduction with constructor " "initializer for base class %qT", atype); return true; } tree rtype = build_reference_type (atype); omp_priv = build_static_cast (rtype, omp_priv, tf_warning_or_error); omp_orig = build_static_cast (rtype, omp_orig, tf_warning_or_error); if (omp_priv == error_mark_node || omp_orig == error_mark_node) return true; omp_priv = convert_from_reference (omp_priv); omp_orig = convert_from_reference (omp_orig); } if (i == 6) *need_default_ctor = true; OMP_CLAUSE_REDUCTION_INIT (c) = clone_omp_udr (stmts[5], DECL_EXPR_DECL (stmts[4]), DECL_EXPR_DECL (stmts[3]), omp_priv, omp_orig); if (cp_walk_tree (&OMP_CLAUSE_REDUCTION_INIT (c), find_omp_placeholder_r, placeholder, NULL)) OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c) = 1; } else if (i >= 3) { if (CLASS_TYPE_P (type) && !pod_type_p (type)) *need_default_ctor = true; else { tree init; tree v = convert_from_reference (t); if (AGGREGATE_TYPE_P (TREE_TYPE (v))) init = build_constructor (TREE_TYPE (v), NULL); else init = fold_convert (TREE_TYPE (v), integer_zero_node); OMP_CLAUSE_REDUCTION_INIT (c) = build2 (INIT_EXPR, TREE_TYPE (v), v, init); } } } } if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c)) *need_dtor = true; else { error ("user defined reduction not found for %qD", t); return true; } return false; } /* For all elements of CLAUSES, validate them vs OpenMP constraints. Remove any elements from the list that are invalid. */ tree finish_omp_clauses (tree clauses) { bitmap_head generic_head, firstprivate_head, lastprivate_head; bitmap_head aligned_head; tree c, t, *pc; bool branch_seen = false; bool copyprivate_seen = false; bitmap_obstack_initialize (NULL); bitmap_initialize (&generic_head, &bitmap_default_obstack); bitmap_initialize (&firstprivate_head, &bitmap_default_obstack); bitmap_initialize (&lastprivate_head, &bitmap_default_obstack); bitmap_initialize (&aligned_head, &bitmap_default_obstack); for (pc = &clauses, c = clauses; c ; c = *pc) { bool remove = false; switch (OMP_CLAUSE_CODE (c)) { case OMP_CLAUSE_SHARED: goto check_dup_generic; case OMP_CLAUSE_PRIVATE: goto check_dup_generic; case OMP_CLAUSE_REDUCTION: goto check_dup_generic; case OMP_CLAUSE_COPYPRIVATE: copyprivate_seen = true; goto check_dup_generic; case OMP_CLAUSE_COPYIN: goto check_dup_generic; case OMP_CLAUSE_LINEAR: t = OMP_CLAUSE_DECL (c); if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t)) && TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE) { error ("linear clause applied to non-integral non-pointer " "variable with %qT type", TREE_TYPE (t)); remove = true; break; } t = OMP_CLAUSE_LINEAR_STEP (c); if (t == NULL_TREE) t = integer_one_node; if (t == error_mark_node) { remove = true; break; } else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("linear step expression must be integral"); remove = true; break; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { if (TREE_CODE (OMP_CLAUSE_DECL (c)) == PARM_DECL) t = maybe_constant_value (t); t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); if (TREE_CODE (TREE_TYPE (OMP_CLAUSE_DECL (c))) == POINTER_TYPE) { t = pointer_int_sum (OMP_CLAUSE_LOCATION (c), PLUS_EXPR, OMP_CLAUSE_DECL (c), t); t = fold_build2_loc (OMP_CLAUSE_LOCATION (c), MINUS_EXPR, sizetype, t, OMP_CLAUSE_DECL (c)); if (t == error_mark_node) { remove = true; break; } } else t = fold_convert (TREE_TYPE (OMP_CLAUSE_DECL (c)), t); } OMP_CLAUSE_LINEAR_STEP (c) = t; } goto check_dup_generic; check_dup_generic: t = OMP_CLAUSE_DECL (c); if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error ("%qE is not a variable in clause %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); break; case OMP_CLAUSE_FIRSTPRIVATE: t = OMP_CLAUSE_DECL (c); if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in clause %<firstprivate%>", t); else error ("%qE is not a variable in clause %<firstprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&firstprivate_head, DECL_UID (t)); break; case OMP_CLAUSE_LASTPRIVATE: t = OMP_CLAUSE_DECL (c); if (!VAR_P (t) && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in clause %<lastprivate%>", t); else error ("%qE is not a variable in clause %<lastprivate%>", t); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t)) || bitmap_bit_p (&lastprivate_head, DECL_UID (t))) { error ("%qD appears more than once in data clauses", t); remove = true; } else bitmap_set_bit (&lastprivate_head, DECL_UID (t)); break; case OMP_CLAUSE_IF: t = OMP_CLAUSE_IF_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_IF_EXPR (c) = t; break; case OMP_CLAUSE_FINAL: t = OMP_CLAUSE_FINAL_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_FINAL_EXPR (c) = t; break; case OMP_CLAUSE_NUM_THREADS: t = OMP_CLAUSE_NUM_THREADS_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("num_threads expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_NUM_THREADS_EXPR (c) = t; } break; case OMP_CLAUSE_SCHEDULE: t = OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c); if (t == NULL) ; else if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && (OMP_CLAUSE_SCHEDULE_KIND (c) != OMP_CLAUSE_SCHEDULE_CILKFOR) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("schedule chunk size expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) { if (OMP_CLAUSE_SCHEDULE_KIND (c) == OMP_CLAUSE_SCHEDULE_CILKFOR) { t = convert_to_integer (long_integer_type_node, t); if (t == error_mark_node) { remove = true; break; } } t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } OMP_CLAUSE_SCHEDULE_CHUNK_EXPR (c) = t; } break; case OMP_CLAUSE_SIMDLEN: case OMP_CLAUSE_SAFELEN: t = OMP_CLAUSE_OPERAND (c, 0); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%qs length expression must be integral", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else { t = mark_rvalue_use (t); t = maybe_constant_value (t); if (!processing_template_decl) { if (TREE_CODE (t) != INTEGER_CST || tree_int_cst_sgn (t) != 1) { error ("%qs length expression must be positive constant" " integer expression", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } OMP_CLAUSE_OPERAND (c, 0) = t; } break; case OMP_CLAUSE_NUM_TEAMS: t = OMP_CLAUSE_NUM_TEAMS_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<num_teams%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_NUM_TEAMS_EXPR (c) = t; } break; case OMP_CLAUSE_ASYNC: t = OMP_CLAUSE_ASYNC_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<async%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_ASYNC_EXPR (c) = t; } break; case OMP_CLAUSE_VECTOR_LENGTH: t = OMP_CLAUSE_VECTOR_LENGTH_EXPR (c); t = maybe_convert_cond (t); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_VECTOR_LENGTH_EXPR (c) = t; break; case OMP_CLAUSE_WAIT: t = OMP_CLAUSE_WAIT_EXPR (c); if (t == error_mark_node) remove = true; else if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_WAIT_EXPR (c) = t; break; case OMP_CLAUSE_THREAD_LIMIT: t = OMP_CLAUSE_THREAD_LIMIT_EXPR (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<thread_limit%> expression must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_THREAD_LIMIT_EXPR (c) = t; } break; case OMP_CLAUSE_DEVICE: t = OMP_CLAUSE_DEVICE_ID (c); if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<device%> id must be integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_DEVICE_ID (c) = t; } break; case OMP_CLAUSE_DIST_SCHEDULE: t = OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c); if (t == NULL) ; else if (t == error_mark_node) remove = true; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<dist_schedule%> chunk size expression must be " "integral"); remove = true; } else { t = mark_rvalue_use (t); if (!processing_template_decl) t = fold_build_cleanup_point_expr (TREE_TYPE (t), t); OMP_CLAUSE_DIST_SCHEDULE_CHUNK_EXPR (c) = t; } break; case OMP_CLAUSE_ALIGNED: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in %<aligned%> clause", t); else error ("%qE is not a variable in %<aligned%> clause", t); remove = true; } else if (!type_dependent_expression_p (t) && TREE_CODE (TREE_TYPE (t)) != POINTER_TYPE && TREE_CODE (TREE_TYPE (t)) != ARRAY_TYPE && (TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE || (!POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (t))) && (TREE_CODE (TREE_TYPE (TREE_TYPE (t))) != ARRAY_TYPE)))) { error_at (OMP_CLAUSE_LOCATION (c), "%qE in %<aligned%> clause is neither a pointer nor " "an array nor a reference to pointer or array", t); remove = true; } else if (bitmap_bit_p (&aligned_head, DECL_UID (t))) { error ("%qD appears more than once in %<aligned%> clauses", t); remove = true; } else bitmap_set_bit (&aligned_head, DECL_UID (t)); t = OMP_CLAUSE_ALIGNED_ALIGNMENT (c); if (t == error_mark_node) remove = true; else if (t == NULL_TREE) break; else if (!type_dependent_expression_p (t) && !INTEGRAL_TYPE_P (TREE_TYPE (t))) { error ("%<aligned%> clause alignment expression must " "be integral"); remove = true; } else { t = mark_rvalue_use (t); t = maybe_constant_value (t); if (!processing_template_decl) { if (TREE_CODE (t) != INTEGER_CST || tree_int_cst_sgn (t) != 1) { error ("%<aligned%> clause alignment expression must be " "positive constant integer expression"); remove = true; } } OMP_CLAUSE_ALIGNED_ALIGNMENT (c) = t; } break; case OMP_CLAUSE_DEPEND: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c)) remove = true; break; } if (t == error_mark_node) remove = true; else if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not a variable in %<depend%> clause", t); else error ("%qE is not a variable in %<depend%> clause", t); remove = true; } else if (!processing_template_decl && !cxx_mark_addressable (t)) remove = true; break; case OMP_CLAUSE_MAP: case OMP_CLAUSE_TO: case OMP_CLAUSE_FROM: case OMP_CLAUSE__CACHE_: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) == TREE_LIST) { if (handle_omp_array_sections (c)) remove = true; else { t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != TREE_LIST && !type_dependent_expression_p (t) && !cp_omp_mappable_type (TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "array section does not have mappable type " "in %qs clause", omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } break; } if (t == error_mark_node) remove = true; else if (TREE_CODE (t) != VAR_DECL && TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER) break; if (DECL_P (t)) error ("%qD is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); else error ("%qE is not a variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (TREE_CODE (t) == VAR_DECL && DECL_THREAD_LOCAL_P (t)) { error ("%qD is threadprivate variable in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (!processing_template_decl && TREE_CODE (TREE_TYPE (t)) != REFERENCE_TYPE && !cxx_mark_addressable (t)) remove = true; else if (!(OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER) && !type_dependent_expression_p (t) && !cp_omp_mappable_type ((TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE) ? TREE_TYPE (TREE_TYPE (t)) : TREE_TYPE (t))) { error_at (OMP_CLAUSE_LOCATION (c), "%qD does not have a mappable type in %qs clause", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } else if (bitmap_bit_p (&generic_head, DECL_UID (t))) { if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP) error ("%qD appears more than once in motion clauses", t); else error ("%qD appears more than once in map clauses", t); remove = true; } else bitmap_set_bit (&generic_head, DECL_UID (t)); break; case OMP_CLAUSE_UNIFORM: t = OMP_CLAUSE_DECL (c); if (TREE_CODE (t) != PARM_DECL) { if (processing_template_decl) break; if (DECL_P (t)) error ("%qD is not an argument in %<uniform%> clause", t); else error ("%qE is not an argument in %<uniform%> clause", t); remove = true; break; } goto check_dup_generic; case OMP_CLAUSE_NOWAIT: case OMP_CLAUSE_ORDERED: case OMP_CLAUSE_DEFAULT: case OMP_CLAUSE_UNTIED: case OMP_CLAUSE_COLLAPSE: case OMP_CLAUSE_MERGEABLE: case OMP_CLAUSE_PARALLEL: case OMP_CLAUSE_FOR: case OMP_CLAUSE_SECTIONS: case OMP_CLAUSE_TASKGROUP: case OMP_CLAUSE_PROC_BIND: case OMP_CLAUSE__CILK_FOR_COUNT_: break; case OMP_CLAUSE_INBRANCH: case OMP_CLAUSE_NOTINBRANCH: if (branch_seen) { error ("%<inbranch%> clause is incompatible with " "%<notinbranch%>"); remove = true; } branch_seen = true; break; default: gcc_unreachable (); } if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } for (pc = &clauses, c = clauses; c ; c = *pc) { enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c); bool remove = false; bool need_complete_non_reference = false; bool need_default_ctor = false; bool need_copy_ctor = false; bool need_copy_assignment = false; bool need_implicitly_determined = false; bool need_dtor = false; tree type, inner_type; switch (c_kind) { case OMP_CLAUSE_SHARED: need_implicitly_determined = true; break; case OMP_CLAUSE_PRIVATE: need_complete_non_reference = true; need_default_ctor = true; need_dtor = true; need_implicitly_determined = true; break; case OMP_CLAUSE_FIRSTPRIVATE: need_complete_non_reference = true; need_copy_ctor = true; need_dtor = true; need_implicitly_determined = true; break; case OMP_CLAUSE_LASTPRIVATE: need_complete_non_reference = true; need_copy_assignment = true; need_implicitly_determined = true; break; case OMP_CLAUSE_REDUCTION: need_implicitly_determined = true; break; case OMP_CLAUSE_COPYPRIVATE: need_copy_assignment = true; break; case OMP_CLAUSE_COPYIN: need_copy_assignment = true; break; case OMP_CLAUSE_NOWAIT: if (copyprivate_seen) { error_at (OMP_CLAUSE_LOCATION (c), "%<nowait%> clause must not be used together " "with %<copyprivate%>"); *pc = OMP_CLAUSE_CHAIN (c); continue; } /* FALLTHRU */ default: pc = &OMP_CLAUSE_CHAIN (c); continue; } t = OMP_CLAUSE_DECL (c); if (processing_template_decl && !VAR_P (t) && TREE_CODE (t) != PARM_DECL) { pc = &OMP_CLAUSE_CHAIN (c); continue; } switch (c_kind) { case OMP_CLAUSE_LASTPRIVATE: if (!bitmap_bit_p (&firstprivate_head, DECL_UID (t))) { need_default_ctor = true; need_dtor = true; } break; case OMP_CLAUSE_REDUCTION: if (finish_omp_reduction_clause (c, &need_default_ctor, &need_dtor)) remove = true; else t = OMP_CLAUSE_DECL (c); break; case OMP_CLAUSE_COPYIN: if (!VAR_P (t) || !DECL_THREAD_LOCAL_P (t)) { error ("%qE must be %<threadprivate%> for %<copyin%>", t); remove = true; } break; default: break; } if (need_complete_non_reference || need_copy_assignment) { t = require_complete_type (t); if (t == error_mark_node) remove = true; else if (TREE_CODE (TREE_TYPE (t)) == REFERENCE_TYPE && need_complete_non_reference) { error ("%qE has reference type for %qs", t, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } if (need_implicitly_determined) { const char *share_name = NULL; if (VAR_P (t) && DECL_THREAD_LOCAL_P (t)) share_name = "threadprivate"; else switch (cxx_omp_predetermined_sharing (t)) { case OMP_CLAUSE_DEFAULT_UNSPECIFIED: break; case OMP_CLAUSE_DEFAULT_SHARED: /* const vars may be specified in firstprivate clause. */ if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE && cxx_omp_const_qual_no_mutable (t)) break; share_name = "shared"; break; case OMP_CLAUSE_DEFAULT_PRIVATE: share_name = "private"; break; default: gcc_unreachable (); } if (share_name) { error ("%qE is predetermined %qs for %qs", t, share_name, omp_clause_code_name[OMP_CLAUSE_CODE (c)]); remove = true; } } /* We're interested in the base element, not arrays. */ inner_type = type = TREE_TYPE (t); while (TREE_CODE (inner_type) == ARRAY_TYPE) inner_type = TREE_TYPE (inner_type); if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION && TREE_CODE (inner_type) == REFERENCE_TYPE) inner_type = TREE_TYPE (inner_type); /* Check for special function availability by building a call to one. Save the results, because later we won't be in the right context for making these queries. */ if (CLASS_TYPE_P (inner_type) && COMPLETE_TYPE_P (inner_type) && (need_default_ctor || need_copy_ctor || need_copy_assignment || need_dtor) && !type_dependent_expression_p (t) && cxx_omp_create_clause_info (c, inner_type, need_default_ctor, need_copy_ctor, need_copy_assignment, need_dtor)) remove = true; if (remove) *pc = OMP_CLAUSE_CHAIN (c); else pc = &OMP_CLAUSE_CHAIN (c); } bitmap_obstack_release (NULL); return clauses; } /* For all variables in the tree_list VARS, mark them as thread local. */ void finish_omp_threadprivate (tree vars) { tree t; /* Mark every variable in VARS to be assigned thread local storage. */ for (t = vars; t; t = TREE_CHAIN (t)) { tree v = TREE_PURPOSE (t); if (error_operand_p (v)) ; else if (!VAR_P (v)) error ("%<threadprivate%> %qD is not file, namespace " "or block scope variable", v); /* If V had already been marked threadprivate, it doesn't matter whether it had been used prior to this point. */ else if (TREE_USED (v) && (DECL_LANG_SPECIFIC (v) == NULL || !CP_DECL_THREADPRIVATE_P (v))) error ("%qE declared %<threadprivate%> after first use", v); else if (! TREE_STATIC (v) && ! DECL_EXTERNAL (v)) error ("automatic variable %qE cannot be %<threadprivate%>", v); else if (! COMPLETE_TYPE_P (complete_type (TREE_TYPE (v)))) error ("%<threadprivate%> %qE has incomplete type", v); else if (TREE_STATIC (v) && TYPE_P (CP_DECL_CONTEXT (v)) && CP_DECL_CONTEXT (v) != current_class_type) error ("%<threadprivate%> %qE directive not " "in %qT definition", v, CP_DECL_CONTEXT (v)); else { /* Allocate a LANG_SPECIFIC structure for V, if needed. */ if (DECL_LANG_SPECIFIC (v) == NULL) { retrofit_lang_decl (v); /* Make sure that DECL_DISCRIMINATOR_P continues to be true after the allocation of the lang_decl structure. */ if (DECL_DISCRIMINATOR_P (v)) DECL_LANG_SPECIFIC (v)->u.base.u2sel = 1; } if (! DECL_THREAD_LOCAL_P (v)) { set_decl_tls_model (v, decl_default_tls_model (v)); /* If rtl has been already set for this var, call make_decl_rtl once again, so that encode_section_info has a chance to look at the new decl flags. */ if (DECL_RTL_SET_P (v)) make_decl_rtl (v); } CP_DECL_THREADPRIVATE_P (v) = 1; } } } /* Build an OpenMP structured block. */ tree begin_omp_structured_block (void) { return do_pushlevel (sk_omp); } tree finish_omp_structured_block (tree block) { return do_poplevel (block); } /* Generate OACC_DATA, with CLAUSES and BLOCK as its compound statement. LOC is the location of the OACC_DATA. */ tree finish_oacc_data (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_DATA); TREE_TYPE (stmt) = void_type_node; OACC_DATA_CLAUSES (stmt) = clauses; OACC_DATA_BODY (stmt) = block; return add_stmt (stmt); } /* Generate OACC_KERNELS, with CLAUSES and BLOCK as its compound statement. LOC is the location of the OACC_KERNELS. */ tree finish_oacc_kernels (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_KERNELS); TREE_TYPE (stmt) = void_type_node; OACC_KERNELS_CLAUSES (stmt) = clauses; OACC_KERNELS_BODY (stmt) = block; return add_stmt (stmt); } /* Generate OACC_PARALLEL, with CLAUSES and BLOCK as its compound statement. LOC is the location of the OACC_PARALLEL. */ tree finish_oacc_parallel (tree clauses, tree block) { tree stmt; block = finish_omp_structured_block (block); stmt = make_node (OACC_PARALLEL); TREE_TYPE (stmt) = void_type_node; OACC_PARALLEL_CLAUSES (stmt) = clauses; OACC_PARALLEL_BODY (stmt) = block; return add_stmt (stmt); } /* Similarly, except force the retention of the BLOCK. */ tree begin_omp_parallel (void) { keep_next_level (true); return begin_omp_structured_block (); } tree finish_omp_parallel (tree clauses, tree body) { tree stmt; body = finish_omp_structured_block (body); stmt = make_node (OMP_PARALLEL); TREE_TYPE (stmt) = void_type_node; OMP_PARALLEL_CLAUSES (stmt) = clauses; OMP_PARALLEL_BODY (stmt) = body; return add_stmt (stmt); } tree begin_omp_task (void) { keep_next_level (true); return begin_omp_structured_block (); } tree finish_omp_task (tree clauses, tree body) { tree stmt; body = finish_omp_structured_block (body); stmt = make_node (OMP_TASK); TREE_TYPE (stmt) = void_type_node; OMP_TASK_CLAUSES (stmt) = clauses; OMP_TASK_BODY (stmt) = body; return add_stmt (stmt); } /* Helper function for finish_omp_for. Convert Ith random access iterator into integral iterator. Return FALSE if successful. */ static bool handle_omp_for_class_iterator (int i, location_t locus, tree declv, tree initv, tree condv, tree incrv, tree *body, tree *pre_body, tree clauses, tree *lastp) { tree diff, iter_init, iter_incr = NULL, last; tree incr_var = NULL, orig_pre_body, orig_body, c; tree decl = TREE_VEC_ELT (declv, i); tree init = TREE_VEC_ELT (initv, i); tree cond = TREE_VEC_ELT (condv, i); tree incr = TREE_VEC_ELT (incrv, i); tree iter = decl; location_t elocus = locus; if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); switch (TREE_CODE (cond)) { case GT_EXPR: case GE_EXPR: case LT_EXPR: case LE_EXPR: case NE_EXPR: if (TREE_OPERAND (cond, 1) == iter) cond = build2 (swap_tree_comparison (TREE_CODE (cond)), TREE_TYPE (cond), iter, TREE_OPERAND (cond, 0)); if (TREE_OPERAND (cond, 0) != iter) cond = error_mark_node; else { tree tem = build_x_binary_op (EXPR_LOCATION (cond), TREE_CODE (cond), iter, ERROR_MARK, TREE_OPERAND (cond, 1), ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (tem)) return true; } break; default: cond = error_mark_node; break; } if (cond == error_mark_node) { error_at (elocus, "invalid controlling predicate"); return true; } diff = build_x_binary_op (elocus, MINUS_EXPR, TREE_OPERAND (cond, 1), ERROR_MARK, iter, ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (diff)) return true; if (TREE_CODE (TREE_TYPE (diff)) != INTEGER_TYPE) { error_at (elocus, "difference between %qE and %qD does not have integer type", TREE_OPERAND (cond, 1), iter); return true; } switch (TREE_CODE (incr)) { case PREINCREMENT_EXPR: case PREDECREMENT_EXPR: case POSTINCREMENT_EXPR: case POSTDECREMENT_EXPR: if (TREE_OPERAND (incr, 0) != iter) { incr = error_mark_node; break; } iter_incr = build_x_unary_op (EXPR_LOCATION (incr), TREE_CODE (incr), iter, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; else if (TREE_CODE (incr) == PREINCREMENT_EXPR || TREE_CODE (incr) == POSTINCREMENT_EXPR) incr = integer_one_node; else incr = integer_minus_one_node; break; case MODIFY_EXPR: if (TREE_OPERAND (incr, 0) != iter) incr = error_mark_node; else if (TREE_CODE (TREE_OPERAND (incr, 1)) == PLUS_EXPR || TREE_CODE (TREE_OPERAND (incr, 1)) == MINUS_EXPR) { tree rhs = TREE_OPERAND (incr, 1); if (TREE_OPERAND (rhs, 0) == iter) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 1))) != INTEGER_TYPE) incr = error_mark_node; else { iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs), iter, TREE_CODE (rhs), TREE_OPERAND (rhs, 1), tf_warning_or_error); if (error_operand_p (iter_incr)) return true; incr = TREE_OPERAND (rhs, 1); incr = cp_convert (TREE_TYPE (diff), incr, tf_warning_or_error); if (TREE_CODE (rhs) == MINUS_EXPR) { incr = build1 (NEGATE_EXPR, TREE_TYPE (diff), incr); incr = fold_if_not_in_template (incr); } if (TREE_CODE (incr) != INTEGER_CST && (TREE_CODE (incr) != NOP_EXPR || (TREE_CODE (TREE_OPERAND (incr, 0)) != INTEGER_CST))) iter_incr = NULL; } } else if (TREE_OPERAND (rhs, 1) == iter) { if (TREE_CODE (TREE_TYPE (TREE_OPERAND (rhs, 0))) != INTEGER_TYPE || TREE_CODE (rhs) != PLUS_EXPR) incr = error_mark_node; else { iter_incr = build_x_binary_op (EXPR_LOCATION (rhs), PLUS_EXPR, TREE_OPERAND (rhs, 0), ERROR_MARK, iter, ERROR_MARK, NULL, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; iter_incr = build_x_modify_expr (EXPR_LOCATION (rhs), iter, NOP_EXPR, iter_incr, tf_warning_or_error); if (error_operand_p (iter_incr)) return true; incr = TREE_OPERAND (rhs, 0); iter_incr = NULL; } } else incr = error_mark_node; } else incr = error_mark_node; break; default: incr = error_mark_node; break; } if (incr == error_mark_node) { error_at (elocus, "invalid increment expression"); return true; } incr = cp_convert (TREE_TYPE (diff), incr, tf_warning_or_error); for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c)) if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE && OMP_CLAUSE_DECL (c) == iter) break; decl = create_temporary_var (TREE_TYPE (diff)); pushdecl (decl); add_decl_expr (decl); last = create_temporary_var (TREE_TYPE (diff)); pushdecl (last); add_decl_expr (last); if (c && iter_incr == NULL) { incr_var = create_temporary_var (TREE_TYPE (diff)); pushdecl (incr_var); add_decl_expr (incr_var); } gcc_assert (stmts_are_full_exprs_p ()); orig_pre_body = *pre_body; *pre_body = push_stmt_list (); if (orig_pre_body) add_stmt (orig_pre_body); if (init != NULL) finish_expr_stmt (build_x_modify_expr (elocus, iter, NOP_EXPR, init, tf_warning_or_error)); init = build_int_cst (TREE_TYPE (diff), 0); if (c && iter_incr == NULL) { finish_expr_stmt (build_x_modify_expr (elocus, incr_var, NOP_EXPR, incr, tf_warning_or_error)); incr = incr_var; iter_incr = build_x_modify_expr (elocus, iter, PLUS_EXPR, incr, tf_warning_or_error); } finish_expr_stmt (build_x_modify_expr (elocus, last, NOP_EXPR, init, tf_warning_or_error)); *pre_body = pop_stmt_list (*pre_body); cond = cp_build_binary_op (elocus, TREE_CODE (cond), decl, diff, tf_warning_or_error); incr = build_modify_expr (elocus, decl, NULL_TREE, PLUS_EXPR, elocus, incr, NULL_TREE); orig_body = *body; *body = push_stmt_list (); iter_init = build2 (MINUS_EXPR, TREE_TYPE (diff), decl, last); iter_init = build_x_modify_expr (elocus, iter, PLUS_EXPR, iter_init, tf_warning_or_error); iter_init = build1 (NOP_EXPR, void_type_node, iter_init); finish_expr_stmt (iter_init); finish_expr_stmt (build_x_modify_expr (elocus, last, NOP_EXPR, decl, tf_warning_or_error)); add_stmt (orig_body); *body = pop_stmt_list (*body); if (c) { OMP_CLAUSE_LASTPRIVATE_STMT (c) = push_stmt_list (); finish_expr_stmt (iter_incr); OMP_CLAUSE_LASTPRIVATE_STMT (c) = pop_stmt_list (OMP_CLAUSE_LASTPRIVATE_STMT (c)); } TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; *lastp = last; return false; } /* Build and validate an OMP_FOR statement. CLAUSES, BODY, COND, INCR are directly for their associated operands in the statement. DECL and INIT are a combo; if DECL is NULL then INIT ought to be a MODIFY_EXPR, and the DECL should be extracted. PRE_BODY are optional statements that need to go before the loop into its sk_omp scope. */ tree finish_omp_for (location_t locus, enum tree_code code, tree declv, tree initv, tree condv, tree incrv, tree body, tree pre_body, tree clauses) { tree omp_for = NULL, orig_incr = NULL; tree decl = NULL, init, cond, incr, orig_decl = NULL_TREE, block = NULL_TREE; tree last = NULL_TREE; location_t elocus; int i; gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (initv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (condv)); gcc_assert (TREE_VEC_LENGTH (declv) == TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { decl = TREE_VEC_ELT (declv, i); init = TREE_VEC_ELT (initv, i); cond = TREE_VEC_ELT (condv, i); incr = TREE_VEC_ELT (incrv, i); elocus = locus; if (decl == NULL) { if (init != NULL) switch (TREE_CODE (init)) { case MODIFY_EXPR: decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 1); break; case MODOP_EXPR: if (TREE_CODE (TREE_OPERAND (init, 1)) == NOP_EXPR) { decl = TREE_OPERAND (init, 0); init = TREE_OPERAND (init, 2); } break; default: break; } if (decl == NULL) { error_at (locus, "expected iteration declaration or initialization"); return NULL; } } if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); if (cond == NULL) { error_at (elocus, "missing controlling predicate"); return NULL; } if (incr == NULL) { error_at (elocus, "missing increment expression"); return NULL; } TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; } if (dependent_omp_for_p (declv, initv, condv, incrv)) { tree stmt; stmt = make_node (code); for (i = 0; i < TREE_VEC_LENGTH (declv); i++) { /* This is really just a place-holder. We'll be decomposing this again and going through the cp_build_modify_expr path below when we instantiate the thing. */ TREE_VEC_ELT (initv, i) = build2 (MODIFY_EXPR, void_type_node, TREE_VEC_ELT (declv, i), TREE_VEC_ELT (initv, i)); } TREE_TYPE (stmt) = void_type_node; OMP_FOR_INIT (stmt) = initv; OMP_FOR_COND (stmt) = condv; OMP_FOR_INCR (stmt) = incrv; OMP_FOR_BODY (stmt) = body; OMP_FOR_PRE_BODY (stmt) = pre_body; OMP_FOR_CLAUSES (stmt) = clauses; SET_EXPR_LOCATION (stmt, locus); return add_stmt (stmt); } if (processing_template_decl) orig_incr = make_tree_vec (TREE_VEC_LENGTH (incrv)); for (i = 0; i < TREE_VEC_LENGTH (declv); ) { decl = TREE_VEC_ELT (declv, i); init = TREE_VEC_ELT (initv, i); cond = TREE_VEC_ELT (condv, i); incr = TREE_VEC_ELT (incrv, i); if (orig_incr) TREE_VEC_ELT (orig_incr, i) = incr; elocus = locus; if (init && EXPR_HAS_LOCATION (init)) elocus = EXPR_LOCATION (init); if (!DECL_P (decl)) { error_at (elocus, "expected iteration declaration or initialization"); return NULL; } if (incr && TREE_CODE (incr) == MODOP_EXPR) { if (orig_incr) TREE_VEC_ELT (orig_incr, i) = incr; incr = cp_build_modify_expr (TREE_OPERAND (incr, 0), TREE_CODE (TREE_OPERAND (incr, 1)), TREE_OPERAND (incr, 2), tf_warning_or_error); } if (CLASS_TYPE_P (TREE_TYPE (decl))) { if (code == OMP_SIMD) { error_at (elocus, "%<#pragma omp simd%> used with class " "iteration variable %qE", decl); return NULL; } if (code == CILK_FOR && i == 0) orig_decl = decl; if (handle_omp_for_class_iterator (i, locus, declv, initv, condv, incrv, &body, &pre_body, clauses, &last)) return NULL; continue; } if (!INTEGRAL_TYPE_P (TREE_TYPE (decl)) && !TYPE_PTR_P (TREE_TYPE (decl))) { error_at (elocus, "invalid type for iteration variable %qE", decl); return NULL; } if (!processing_template_decl) { init = fold_build_cleanup_point_expr (TREE_TYPE (init), init); init = cp_build_modify_expr (decl, NOP_EXPR, init, tf_warning_or_error); } else init = build2 (MODIFY_EXPR, void_type_node, decl, init); if (cond && TREE_SIDE_EFFECTS (cond) && COMPARISON_CLASS_P (cond) && !processing_template_decl) { tree t = TREE_OPERAND (cond, 0); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (cond, 0) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); t = TREE_OPERAND (cond, 1); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (cond, 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } if (decl == error_mark_node || init == error_mark_node) return NULL; TREE_VEC_ELT (declv, i) = decl; TREE_VEC_ELT (initv, i) = init; TREE_VEC_ELT (condv, i) = cond; TREE_VEC_ELT (incrv, i) = incr; i++; } if (IS_EMPTY_STMT (pre_body)) pre_body = NULL; if (code == CILK_FOR && !processing_template_decl) block = push_stmt_list (); omp_for = c_finish_omp_for (locus, code, declv, initv, condv, incrv, body, pre_body); if (omp_for == NULL) { if (block) pop_stmt_list (block); return NULL; } for (i = 0; i < TREE_VEC_LENGTH (OMP_FOR_INCR (omp_for)); i++) { decl = TREE_OPERAND (TREE_VEC_ELT (OMP_FOR_INIT (omp_for), i), 0); incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i); if (TREE_CODE (incr) != MODIFY_EXPR) continue; if (TREE_SIDE_EFFECTS (TREE_OPERAND (incr, 1)) && BINARY_CLASS_P (TREE_OPERAND (incr, 1)) && !processing_template_decl) { tree t = TREE_OPERAND (TREE_OPERAND (incr, 1), 0); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (TREE_OPERAND (incr, 1), 0) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); t = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); if (TREE_SIDE_EFFECTS (t) && t != decl && (TREE_CODE (t) != NOP_EXPR || TREE_OPERAND (t, 0) != decl)) TREE_OPERAND (TREE_OPERAND (incr, 1), 1) = fold_build_cleanup_point_expr (TREE_TYPE (t), t); } if (orig_incr) TREE_VEC_ELT (OMP_FOR_INCR (omp_for), i) = TREE_VEC_ELT (orig_incr, i); } OMP_FOR_CLAUSES (omp_for) = clauses; if (block) { tree omp_par = make_node (OMP_PARALLEL); TREE_TYPE (omp_par) = void_type_node; OMP_PARALLEL_CLAUSES (omp_par) = NULL_TREE; tree bind = build3 (BIND_EXPR, void_type_node, NULL, NULL, NULL); TREE_SIDE_EFFECTS (bind) = 1; BIND_EXPR_BODY (bind) = pop_stmt_list (block); OMP_PARALLEL_BODY (omp_par) = bind; if (OMP_FOR_PRE_BODY (omp_for)) { add_stmt (OMP_FOR_PRE_BODY (omp_for)); OMP_FOR_PRE_BODY (omp_for) = NULL_TREE; } init = TREE_VEC_ELT (OMP_FOR_INIT (omp_for), 0); decl = TREE_OPERAND (init, 0); cond = TREE_VEC_ELT (OMP_FOR_COND (omp_for), 0); incr = TREE_VEC_ELT (OMP_FOR_INCR (omp_for), 0); tree t = TREE_OPERAND (cond, 1), c, clauses, *pc; clauses = OMP_FOR_CLAUSES (omp_for); OMP_FOR_CLAUSES (omp_for) = NULL_TREE; for (pc = &clauses; *pc; ) if (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_SCHEDULE) { gcc_assert (OMP_FOR_CLAUSES (omp_for) == NULL_TREE); OMP_FOR_CLAUSES (omp_for) = *pc; *pc = OMP_CLAUSE_CHAIN (*pc); OMP_CLAUSE_CHAIN (OMP_FOR_CLAUSES (omp_for)) = NULL_TREE; } else { gcc_assert (OMP_CLAUSE_CODE (*pc) == OMP_CLAUSE_FIRSTPRIVATE); pc = &OMP_CLAUSE_CHAIN (*pc); } if (TREE_CODE (t) != INTEGER_CST) { TREE_OPERAND (cond, 1) = get_temp_regvar (TREE_TYPE (t), t); c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = TREE_OPERAND (cond, 1); OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } if (TREE_CODE (incr) == MODIFY_EXPR) { t = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); if (TREE_CODE (t) != INTEGER_CST) { TREE_OPERAND (TREE_OPERAND (incr, 1), 1) = get_temp_regvar (TREE_TYPE (t), t); c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = TREE_OPERAND (TREE_OPERAND (incr, 1), 1); OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } } t = TREE_OPERAND (init, 1); if (TREE_CODE (t) != INTEGER_CST) { TREE_OPERAND (init, 1) = get_temp_regvar (TREE_TYPE (t), t); c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = TREE_OPERAND (init, 1); OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } if (orig_decl && orig_decl != decl) { c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = orig_decl; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } if (last) { c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = last; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } c = build_omp_clause (input_location, OMP_CLAUSE_PRIVATE); OMP_CLAUSE_DECL (c) = decl; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; c = build_omp_clause (input_location, OMP_CLAUSE__CILK_FOR_COUNT_); OMP_CLAUSE_OPERAND (c, 0) = cilk_for_number_of_iterations (omp_for); OMP_CLAUSE_CHAIN (c) = clauses; OMP_PARALLEL_CLAUSES (omp_par) = finish_omp_clauses (c); add_stmt (omp_par); return omp_par; } else if (code == CILK_FOR && processing_template_decl) { tree c, clauses = OMP_FOR_CLAUSES (omp_for); if (orig_decl && orig_decl != decl) { c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = orig_decl; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } if (last) { c = build_omp_clause (input_location, OMP_CLAUSE_FIRSTPRIVATE); OMP_CLAUSE_DECL (c) = last; OMP_CLAUSE_CHAIN (c) = clauses; clauses = c; } OMP_FOR_CLAUSES (omp_for) = clauses; } return omp_for; } void finish_omp_atomic (enum tree_code code, enum tree_code opcode, tree lhs, tree rhs, tree v, tree lhs1, tree rhs1, bool seq_cst) { tree orig_lhs; tree orig_rhs; tree orig_v; tree orig_lhs1; tree orig_rhs1; bool dependent_p; tree stmt; orig_lhs = lhs; orig_rhs = rhs; orig_v = v; orig_lhs1 = lhs1; orig_rhs1 = rhs1; dependent_p = false; stmt = NULL_TREE; /* Even in a template, we can detect invalid uses of the atomic pragma if neither LHS nor RHS is type-dependent. */ if (processing_template_decl) { dependent_p = (type_dependent_expression_p (lhs) || (rhs && type_dependent_expression_p (rhs)) || (v && type_dependent_expression_p (v)) || (lhs1 && type_dependent_expression_p (lhs1)) || (rhs1 && type_dependent_expression_p (rhs1))); if (!dependent_p) { lhs = build_non_dependent_expr (lhs); if (rhs) rhs = build_non_dependent_expr (rhs); if (v) v = build_non_dependent_expr (v); if (lhs1) lhs1 = build_non_dependent_expr (lhs1); if (rhs1) rhs1 = build_non_dependent_expr (rhs1); } } if (!dependent_p) { bool swapped = false; if (rhs1 && cp_tree_equal (lhs, rhs)) { tree tem = rhs; rhs = rhs1; rhs1 = tem; swapped = !commutative_tree_code (opcode); } if (rhs1 && !cp_tree_equal (lhs, rhs1)) { if (code == OMP_ATOMIC) error ("%<#pragma omp atomic update%> uses two different " "expressions for memory"); else error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); return; } if (lhs1 && !cp_tree_equal (lhs, lhs1)) { if (code == OMP_ATOMIC) error ("%<#pragma omp atomic update%> uses two different " "expressions for memory"); else error ("%<#pragma omp atomic capture%> uses two different " "expressions for memory"); return; } stmt = c_finish_omp_atomic (input_location, code, opcode, lhs, rhs, v, lhs1, rhs1, swapped, seq_cst); if (stmt == error_mark_node) return; } if (processing_template_decl) { if (code == OMP_ATOMIC_READ) { stmt = build_min_nt_loc (EXPR_LOCATION (orig_lhs), OMP_ATOMIC_READ, orig_lhs); OMP_ATOMIC_SEQ_CST (stmt) = seq_cst; stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt); } else { if (opcode == NOP_EXPR) stmt = build2 (MODIFY_EXPR, void_type_node, orig_lhs, orig_rhs); else stmt = build2 (opcode, void_type_node, orig_lhs, orig_rhs); if (orig_rhs1) stmt = build_min_nt_loc (EXPR_LOCATION (orig_rhs1), COMPOUND_EXPR, orig_rhs1, stmt); if (code != OMP_ATOMIC) { stmt = build_min_nt_loc (EXPR_LOCATION (orig_lhs1), code, orig_lhs1, stmt); OMP_ATOMIC_SEQ_CST (stmt) = seq_cst; stmt = build2 (MODIFY_EXPR, void_type_node, orig_v, stmt); } } stmt = build2 (OMP_ATOMIC, void_type_node, integer_zero_node, stmt); OMP_ATOMIC_SEQ_CST (stmt) = seq_cst; } finish_expr_stmt (stmt); } void finish_omp_barrier (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_BARRIER); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_flush (void) { tree fn = builtin_decl_explicit (BUILT_IN_SYNC_SYNCHRONIZE); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_taskwait (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKWAIT); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_taskyield (void) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_TASKYIELD); vec<tree, va_gc> *vec = make_tree_vector (); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_cancel (tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCEL); int mask = 0; if (find_omp_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (find_omp_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (find_omp_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (find_omp_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error ("%<#pragma omp cancel must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses"); return; } vec<tree, va_gc> *vec = make_tree_vector (); tree ifc = find_omp_clause (clauses, OMP_CLAUSE_IF); if (ifc != NULL_TREE) { tree type = TREE_TYPE (OMP_CLAUSE_IF_EXPR (ifc)); ifc = fold_build2_loc (OMP_CLAUSE_LOCATION (ifc), NE_EXPR, boolean_type_node, OMP_CLAUSE_IF_EXPR (ifc), build_zero_cst (type)); } else ifc = boolean_true_node; vec->quick_push (build_int_cst (integer_type_node, mask)); vec->quick_push (ifc); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } void finish_omp_cancellation_point (tree clauses) { tree fn = builtin_decl_explicit (BUILT_IN_GOMP_CANCELLATION_POINT); int mask = 0; if (find_omp_clause (clauses, OMP_CLAUSE_PARALLEL)) mask = 1; else if (find_omp_clause (clauses, OMP_CLAUSE_FOR)) mask = 2; else if (find_omp_clause (clauses, OMP_CLAUSE_SECTIONS)) mask = 4; else if (find_omp_clause (clauses, OMP_CLAUSE_TASKGROUP)) mask = 8; else { error ("%<#pragma omp cancellation point must specify one of " "%<parallel%>, %<for%>, %<sections%> or %<taskgroup%> clauses"); return; } vec<tree, va_gc> *vec = make_tree_vector_single (build_int_cst (integer_type_node, mask)); tree stmt = finish_call_expr (fn, &vec, false, false, tf_warning_or_error); release_tree_vector (vec); finish_expr_stmt (stmt); } /* Begin a __transaction_atomic or __transaction_relaxed statement. If PCOMPOUND is non-null, this is for a function-transaction-block, and we should create an extra compound stmt. */ tree begin_transaction_stmt (location_t loc, tree *pcompound, int flags) { tree r; if (pcompound) *pcompound = begin_compound_stmt (0); r = build_stmt (loc, TRANSACTION_EXPR, NULL_TREE); /* Only add the statement to the function if support enabled. */ if (flag_tm) add_stmt (r); else error_at (loc, ((flags & TM_STMT_ATTR_RELAXED) != 0 ? G_("%<__transaction_relaxed%> without " "transactional memory support enabled") : G_("%<__transaction_atomic%> without " "transactional memory support enabled"))); TRANSACTION_EXPR_BODY (r) = push_stmt_list (); TREE_SIDE_EFFECTS (r) = 1; return r; } /* End a __transaction_atomic or __transaction_relaxed statement. If COMPOUND_STMT is non-null, this is for a function-transaction-block, and we should end the compound. If NOEX is non-NULL, we wrap the body in a MUST_NOT_THROW_EXPR with NOEX as condition. */ void finish_transaction_stmt (tree stmt, tree compound_stmt, int flags, tree noex) { TRANSACTION_EXPR_BODY (stmt) = pop_stmt_list (TRANSACTION_EXPR_BODY (stmt)); TRANSACTION_EXPR_OUTER (stmt) = (flags & TM_STMT_ATTR_OUTER) != 0; TRANSACTION_EXPR_RELAXED (stmt) = (flags & TM_STMT_ATTR_RELAXED) != 0; TRANSACTION_EXPR_IS_STMT (stmt) = 1; /* noexcept specifications are not allowed for function transactions. */ gcc_assert (!(noex && compound_stmt)); if (noex) { tree body = build_must_not_throw_expr (TRANSACTION_EXPR_BODY (stmt), noex); /* This may not be true when the STATEMENT_LIST is empty. */ if (EXPR_P (body)) SET_EXPR_LOCATION (body, EXPR_LOCATION (TRANSACTION_EXPR_BODY (stmt))); TREE_SIDE_EFFECTS (body) = 1; TRANSACTION_EXPR_BODY (stmt) = body; } if (compound_stmt) finish_compound_stmt (compound_stmt); } /* Build a __transaction_atomic or __transaction_relaxed expression. If NOEX is non-NULL, we wrap the body in a MUST_NOT_THROW_EXPR with NOEX as condition. */ tree build_transaction_expr (location_t loc, tree expr, int flags, tree noex) { tree ret; if (noex) { expr = build_must_not_throw_expr (expr, noex); if (EXPR_P (expr)) SET_EXPR_LOCATION (expr, loc); TREE_SIDE_EFFECTS (expr) = 1; } ret = build1 (TRANSACTION_EXPR, TREE_TYPE (expr), expr); if (flags & TM_STMT_ATTR_RELAXED) TRANSACTION_EXPR_RELAXED (ret) = 1; TREE_SIDE_EFFECTS (ret) = 1; SET_EXPR_LOCATION (ret, loc); return ret; } void init_cp_semantics (void) { } /* Build a STATIC_ASSERT for a static assertion with the condition CONDITION and the message text MESSAGE. LOCATION is the location of the static assertion in the source code. When MEMBER_P, this static assertion is a member of a class. */ void finish_static_assert (tree condition, tree message, location_t location, bool member_p) { if (message == NULL_TREE || message == error_mark_node || condition == NULL_TREE || condition == error_mark_node) return; if (check_for_bare_parameter_packs (condition)) condition = error_mark_node; if (type_dependent_expression_p (condition) || value_dependent_expression_p (condition)) { /* We're in a template; build a STATIC_ASSERT and put it in the right place. */ tree assertion; assertion = make_node (STATIC_ASSERT); STATIC_ASSERT_CONDITION (assertion) = condition; STATIC_ASSERT_MESSAGE (assertion) = message; STATIC_ASSERT_SOURCE_LOCATION (assertion) = location; if (member_p) maybe_add_class_template_decl_list (current_class_type, assertion, /*friend_p=*/0); else add_stmt (assertion); return; } /* Fold the expression and convert it to a boolean value. */ condition = instantiate_non_dependent_expr (condition); condition = cp_convert (boolean_type_node, condition, tf_warning_or_error); condition = maybe_constant_value (condition); if (TREE_CODE (condition) == INTEGER_CST && !integer_zerop (condition)) /* Do nothing; the condition is satisfied. */ ; else { location_t saved_loc = input_location; input_location = location; if (TREE_CODE (condition) == INTEGER_CST && integer_zerop (condition)) /* Report the error. */ error ("static assertion failed: %s", TREE_STRING_POINTER (message)); else if (condition && condition != error_mark_node) { error ("non-constant condition for static assertion"); if (require_potential_rvalue_constant_expression (condition)) cxx_constant_value (condition); } input_location = saved_loc; } } /* Implements the C++0x decltype keyword. Returns the type of EXPR, suitable for use as a type-specifier. ID_EXPRESSION_OR_MEMBER_ACCESS_P is true when EXPR was parsed as an id-expression or a class member access, FALSE when it was parsed as a full expression. */ tree finish_decltype_type (tree expr, bool id_expression_or_member_access_p, tsubst_flags_t complain) { tree type = NULL_TREE; if (!expr || error_operand_p (expr)) return error_mark_node; if (TYPE_P (expr) || TREE_CODE (expr) == TYPE_DECL || (TREE_CODE (expr) == BIT_NOT_EXPR && TYPE_P (TREE_OPERAND (expr, 0)))) { if (complain & tf_error) error ("argument to decltype must be an expression"); return error_mark_node; } /* Depending on the resolution of DR 1172, we may later need to distinguish instantiation-dependent but not type-dependent expressions so that, say, A<decltype(sizeof(T))>::U doesn't require 'typename'. */ if (instantiation_dependent_expression_p (expr)) { type = cxx_make_type (DECLTYPE_TYPE); DECLTYPE_TYPE_EXPR (type) = expr; DECLTYPE_TYPE_ID_EXPR_OR_MEMBER_ACCESS_P (type) = id_expression_or_member_access_p; SET_TYPE_STRUCTURAL_EQUALITY (type); return type; } /* The type denoted by decltype(e) is defined as follows: */ expr = resolve_nondeduced_context (expr); if (invalid_nonstatic_memfn_p (expr, complain)) return error_mark_node; if (type_unknown_p (expr)) { if (complain & tf_error) error ("decltype cannot resolve address of overloaded function"); return error_mark_node; } /* To get the size of a static data member declared as an array of unknown bound, we need to instantiate it. */ if (VAR_P (expr) && VAR_HAD_UNKNOWN_BOUND (expr) && DECL_TEMPLATE_INSTANTIATION (expr)) instantiate_decl (expr, /*defer_ok*/true, /*expl_inst_mem*/false); if (id_expression_or_member_access_p) { /* If e is an id-expression or a class member access (5.2.5 [expr.ref]), decltype(e) is defined as the type of the entity named by e. If there is no such entity, or e names a set of overloaded functions, the program is ill-formed. */ if (identifier_p (expr)) expr = lookup_name (expr); if (INDIRECT_REF_P (expr)) /* This can happen when the expression is, e.g., "a.b". Just look at the underlying operand. */ expr = TREE_OPERAND (expr, 0); if (TREE_CODE (expr) == OFFSET_REF || TREE_CODE (expr) == MEMBER_REF || TREE_CODE (expr) == SCOPE_REF) /* We're only interested in the field itself. If it is a BASELINK, we will need to see through it in the next step. */ expr = TREE_OPERAND (expr, 1); if (BASELINK_P (expr)) /* See through BASELINK nodes to the underlying function. */ expr = BASELINK_FUNCTIONS (expr); switch (TREE_CODE (expr)) { case FIELD_DECL: if (DECL_BIT_FIELD_TYPE (expr)) { type = DECL_BIT_FIELD_TYPE (expr); break; } /* Fall through for fields that aren't bitfields. */ case FUNCTION_DECL: case VAR_DECL: case CONST_DECL: case PARM_DECL: case RESULT_DECL: case TEMPLATE_PARM_INDEX: expr = mark_type_use (expr); type = TREE_TYPE (expr); break; case ERROR_MARK: type = error_mark_node; break; case COMPONENT_REF: case COMPOUND_EXPR: mark_type_use (expr); type = is_bitfield_expr_with_lowered_type (expr); if (!type) type = TREE_TYPE (TREE_OPERAND (expr, 1)); break; case BIT_FIELD_REF: gcc_unreachable (); case INTEGER_CST: case PTRMEM_CST: /* We can get here when the id-expression refers to an enumerator or non-type template parameter. */ type = TREE_TYPE (expr); break; default: /* Handle instantiated template non-type arguments. */ type = TREE_TYPE (expr); break; } } else { /* Within a lambda-expression: Every occurrence of decltype((x)) where x is a possibly parenthesized id-expression that names an entity of automatic storage duration is treated as if x were transformed into an access to a corresponding data member of the closure type that would have been declared if x were a use of the denoted entity. */ if (outer_automatic_var_p (expr) && current_function_decl && LAMBDA_FUNCTION_P (current_function_decl)) type = capture_decltype (expr); else if (error_operand_p (expr)) type = error_mark_node; else if (expr == current_class_ptr) /* If the expression is just "this", we want the cv-unqualified pointer for the "this" type. */ type = TYPE_MAIN_VARIANT (TREE_TYPE (expr)); else { /* Otherwise, where T is the type of e, if e is an lvalue, decltype(e) is defined as T&; if an xvalue, T&&; otherwise, T. */ cp_lvalue_kind clk = lvalue_kind (expr); type = unlowered_expr_type (expr); gcc_assert (TREE_CODE (type) != REFERENCE_TYPE); /* For vector types, pick a non-opaque variant. */ if (TREE_CODE (type) == VECTOR_TYPE) type = strip_typedefs (type); if (clk != clk_none && !(clk & clk_class)) type = cp_build_reference_type (type, (clk & clk_rvalueref)); } } return type; } /* Called from trait_expr_value to evaluate either __has_nothrow_assign or __has_nothrow_copy, depending on assign_p. */ static bool classtype_has_nothrow_assign_or_copy_p (tree type, bool assign_p) { tree fns; if (assign_p) { int ix; ix = lookup_fnfields_1 (type, ansi_assopname (NOP_EXPR)); if (ix < 0) return false; fns = (*CLASSTYPE_METHOD_VEC (type))[ix]; } else if (TYPE_HAS_COPY_CTOR (type)) { /* If construction of the copy constructor was postponed, create it now. */ if (CLASSTYPE_LAZY_COPY_CTOR (type)) lazily_declare_fn (sfk_copy_constructor, type); if (CLASSTYPE_LAZY_MOVE_CTOR (type)) lazily_declare_fn (sfk_move_constructor, type); fns = CLASSTYPE_CONSTRUCTORS (type); } else return false; for (; fns; fns = OVL_NEXT (fns)) { tree fn = OVL_CURRENT (fns); if (assign_p) { if (copy_fn_p (fn) == 0) continue; } else if (copy_fn_p (fn) <= 0) continue; maybe_instantiate_noexcept (fn); if (!TYPE_NOTHROW_P (TREE_TYPE (fn))) return false; } return true; } /* Actually evaluates the trait. */ static bool trait_expr_value (cp_trait_kind kind, tree type1, tree type2) { enum tree_code type_code1; tree t; type_code1 = TREE_CODE (type1); switch (kind) { case CPTK_HAS_NOTHROW_ASSIGN: type1 = strip_array_types (type1); return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE && (trait_expr_value (CPTK_HAS_TRIVIAL_ASSIGN, type1, type2) || (CLASS_TYPE_P (type1) && classtype_has_nothrow_assign_or_copy_p (type1, true)))); case CPTK_HAS_TRIVIAL_ASSIGN: /* ??? The standard seems to be missing the "or array of such a class type" wording for this trait. */ type1 = strip_array_types (type1); return (!CP_TYPE_CONST_P (type1) && type_code1 != REFERENCE_TYPE && (trivial_type_p (type1) || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_COPY_ASSIGN (type1)))); case CPTK_HAS_NOTHROW_CONSTRUCTOR: type1 = strip_array_types (type1); return (trait_expr_value (CPTK_HAS_TRIVIAL_CONSTRUCTOR, type1, type2) || (CLASS_TYPE_P (type1) && (t = locate_ctor (type1)) && (maybe_instantiate_noexcept (t), TYPE_NOTHROW_P (TREE_TYPE (t))))); case CPTK_HAS_TRIVIAL_CONSTRUCTOR: type1 = strip_array_types (type1); return (trivial_type_p (type1) || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_DFLT (type1))); case CPTK_HAS_NOTHROW_COPY: type1 = strip_array_types (type1); return (trait_expr_value (CPTK_HAS_TRIVIAL_COPY, type1, type2) || (CLASS_TYPE_P (type1) && classtype_has_nothrow_assign_or_copy_p (type1, false))); case CPTK_HAS_TRIVIAL_COPY: /* ??? The standard seems to be missing the "or array of such a class type" wording for this trait. */ type1 = strip_array_types (type1); return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_COPY_CTOR (type1))); case CPTK_HAS_TRIVIAL_DESTRUCTOR: type1 = strip_array_types (type1); return (trivial_type_p (type1) || type_code1 == REFERENCE_TYPE || (CLASS_TYPE_P (type1) && TYPE_HAS_TRIVIAL_DESTRUCTOR (type1))); case CPTK_HAS_VIRTUAL_DESTRUCTOR: return type_has_virtual_destructor (type1); case CPTK_IS_ABSTRACT: return (ABSTRACT_CLASS_TYPE_P (type1)); case CPTK_IS_BASE_OF: return (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2) && (same_type_ignoring_top_level_qualifiers_p (type1, type2) || DERIVED_FROM_P (type1, type2))); case CPTK_IS_CLASS: return (NON_UNION_CLASS_TYPE_P (type1)); case CPTK_IS_EMPTY: return (NON_UNION_CLASS_TYPE_P (type1) && CLASSTYPE_EMPTY_P (type1)); case CPTK_IS_ENUM: return (type_code1 == ENUMERAL_TYPE); case CPTK_IS_FINAL: return (CLASS_TYPE_P (type1) && CLASSTYPE_FINAL (type1)); case CPTK_IS_LITERAL_TYPE: return (literal_type_p (type1)); case CPTK_IS_POD: return (pod_type_p (type1)); case CPTK_IS_POLYMORPHIC: return (CLASS_TYPE_P (type1) && TYPE_POLYMORPHIC_P (type1)); case CPTK_IS_STD_LAYOUT: return (std_layout_type_p (type1)); case CPTK_IS_TRIVIAL: return (trivial_type_p (type1)); case CPTK_IS_TRIVIALLY_ASSIGNABLE: return is_trivially_xible (MODIFY_EXPR, type1, type2); case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE: return is_trivially_xible (INIT_EXPR, type1, type2); case CPTK_IS_TRIVIALLY_COPYABLE: return (trivially_copyable_p (type1)); case CPTK_IS_UNION: return (type_code1 == UNION_TYPE); default: gcc_unreachable (); return false; } } /* If TYPE is an array of unknown bound, or (possibly cv-qualified) void, or a complete type, returns true, otherwise false. */ static bool check_trait_type (tree type) { if (type == NULL_TREE) return true; if (TREE_CODE (type) == TREE_LIST) return (check_trait_type (TREE_VALUE (type)) && check_trait_type (TREE_CHAIN (type))); if (TREE_CODE (type) == ARRAY_TYPE && !TYPE_DOMAIN (type) && COMPLETE_TYPE_P (TREE_TYPE (type))) return true; if (VOID_TYPE_P (type)) return true; return !!complete_type_or_else (strip_array_types (type), NULL_TREE); } /* Process a trait expression. */ tree finish_trait_expr (cp_trait_kind kind, tree type1, tree type2) { if (type1 == error_mark_node || type2 == error_mark_node) return error_mark_node; if (processing_template_decl) { tree trait_expr = make_node (TRAIT_EXPR); TREE_TYPE (trait_expr) = boolean_type_node; TRAIT_EXPR_TYPE1 (trait_expr) = type1; TRAIT_EXPR_TYPE2 (trait_expr) = type2; TRAIT_EXPR_KIND (trait_expr) = kind; return trait_expr; } switch (kind) { case CPTK_HAS_NOTHROW_ASSIGN: case CPTK_HAS_TRIVIAL_ASSIGN: case CPTK_HAS_NOTHROW_CONSTRUCTOR: case CPTK_HAS_TRIVIAL_CONSTRUCTOR: case CPTK_HAS_NOTHROW_COPY: case CPTK_HAS_TRIVIAL_COPY: case CPTK_HAS_TRIVIAL_DESTRUCTOR: case CPTK_HAS_VIRTUAL_DESTRUCTOR: case CPTK_IS_ABSTRACT: case CPTK_IS_EMPTY: case CPTK_IS_FINAL: case CPTK_IS_LITERAL_TYPE: case CPTK_IS_POD: case CPTK_IS_POLYMORPHIC: case CPTK_IS_STD_LAYOUT: case CPTK_IS_TRIVIAL: case CPTK_IS_TRIVIALLY_COPYABLE: if (!check_trait_type (type1)) return error_mark_node; break; case CPTK_IS_TRIVIALLY_ASSIGNABLE: case CPTK_IS_TRIVIALLY_CONSTRUCTIBLE: if (!check_trait_type (type1) || !check_trait_type (type2)) return error_mark_node; break; case CPTK_IS_BASE_OF: if (NON_UNION_CLASS_TYPE_P (type1) && NON_UNION_CLASS_TYPE_P (type2) && !same_type_ignoring_top_level_qualifiers_p (type1, type2) && !complete_type_or_else (type2, NULL_TREE)) /* We already issued an error. */ return error_mark_node; break; case CPTK_IS_CLASS: case CPTK_IS_ENUM: case CPTK_IS_UNION: break; default: gcc_unreachable (); } return (trait_expr_value (kind, type1, type2) ? boolean_true_node : boolean_false_node); } /* Do-nothing variants of functions to handle pragma FLOAT_CONST_DECIMAL64, which is ignored for C++. */ void set_float_const_decimal64 (void) { } void clear_float_const_decimal64 (void) { } bool float_const_decimal64_p (void) { return 0; } /* Return true if T designates the implied `this' parameter. */ bool is_this_parameter (tree t) { if (!DECL_P (t) || DECL_NAME (t) != this_identifier) return false; gcc_assert (TREE_CODE (t) == PARM_DECL || is_capture_proxy (t)); return true; } /* Insert the deduced return type for an auto function. */ void apply_deduced_return_type (tree fco, tree return_type) { tree result; if (return_type == error_mark_node) return; if (LAMBDA_FUNCTION_P (fco)) { tree lambda = CLASSTYPE_LAMBDA_EXPR (current_class_type); LAMBDA_EXPR_RETURN_TYPE (lambda) = return_type; } if (DECL_CONV_FN_P (fco)) DECL_NAME (fco) = mangle_conv_op_name_for_type (return_type); TREE_TYPE (fco) = change_return_type (return_type, TREE_TYPE (fco)); result = DECL_RESULT (fco); if (result == NULL_TREE) return; if (TREE_TYPE (result) == return_type) return; /* We already have a DECL_RESULT from start_preparsed_function. Now we need to redo the work it and allocate_struct_function did to reflect the new type. */ gcc_assert (current_function_decl == fco); result = build_decl (input_location, RESULT_DECL, NULL_TREE, TYPE_MAIN_VARIANT (return_type)); DECL_ARTIFICIAL (result) = 1; DECL_IGNORED_P (result) = 1; cp_apply_type_quals_to_decl (cp_type_quals (return_type), result); DECL_RESULT (fco) = result; if (!processing_template_decl) { if (!VOID_TYPE_P (TREE_TYPE (result))) complete_type_or_else (TREE_TYPE (result), NULL_TREE); bool aggr = aggregate_value_p (result, fco); #ifdef PCC_STATIC_STRUCT_RETURN cfun->returns_pcc_struct = aggr; #endif cfun->returns_struct = aggr; } } /* DECL is a local variable or parameter from the surrounding scope of a lambda-expression. Returns the decltype for a use of the capture field for DECL even if it hasn't been captured yet. */ static tree capture_decltype (tree decl) { tree lam = CLASSTYPE_LAMBDA_EXPR (DECL_CONTEXT (current_function_decl)); /* FIXME do lookup instead of list walk? */ tree cap = value_member (decl, LAMBDA_EXPR_CAPTURE_LIST (lam)); tree type; if (cap) type = TREE_TYPE (TREE_PURPOSE (cap)); else switch (LAMBDA_EXPR_DEFAULT_CAPTURE_MODE (lam)) { case CPLD_NONE: error ("%qD is not captured", decl); return error_mark_node; case CPLD_COPY: type = TREE_TYPE (decl); if (TREE_CODE (type) == REFERENCE_TYPE && TREE_CODE (TREE_TYPE (type)) != FUNCTION_TYPE) type = TREE_TYPE (type); break; case CPLD_REFERENCE: type = TREE_TYPE (decl); if (TREE_CODE (type) != REFERENCE_TYPE) type = build_reference_type (TREE_TYPE (decl)); break; default: gcc_unreachable (); } if (TREE_CODE (type) != REFERENCE_TYPE) { if (!LAMBDA_EXPR_MUTABLE_P (lam)) type = cp_build_qualified_type (type, (cp_type_quals (type) |TYPE_QUAL_CONST)); type = build_reference_type (type); } return type; } #include "gt-cp-semantics.h"
GB_unop__identity_int8_fc64.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__identity_int8_fc64) // op(A') function: GB (_unop_tran__identity_int8_fc64) // C type: int8_t // A type: GxB_FC64_t // cast: int8_t cij = GB_cast_to_int8_t (creal (aij)) // unaryop: cij = aij #define GB_ATYPE \ GxB_FC64_t #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ GxB_FC64_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ int8_t z = GB_cast_to_int8_t (creal (aij)) ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GxB_FC64_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ int8_t z = GB_cast_to_int8_t (creal (aij)) ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_IDENTITY || GxB_NO_INT8 || GxB_NO_FC64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__identity_int8_fc64) ( int8_t *Cx, // Cx and Ax may be aliased const GxB_FC64_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GxB_FC64_t aij = Ax [p] ; int8_t z = GB_cast_to_int8_t (creal (aij)) ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; GxB_FC64_t aij = Ax [p] ; int8_t z = GB_cast_to_int8_t (creal (aij)) ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__identity_int8_fc64) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
hw.c
#include <sys/mman.h> #include <openacc.h> #include <stdlib.h> #include <assert.h> #include <stdio.h> #include <math.h> #if defined(_WIN32) || defined(_WIN64) #include <sys/timeb.h> #define gettime(a) _ftime(a) #define usec(t1,t2) ((((t2).time-(t1).time)*1000+((t2).millitm-(t1).millitm))*100) typedef struct _timeb timestruct; #else #include <sys/time.h> #define gettime(a) gettimeofday(a,NULL) #define usec(t1,t2) (((t2).tv_sec-(t1).tv_sec)*1000000+((t2).tv_usec-(t1).tv_usec)) typedef struct timeval timestruct; #endif #define acc_num_device_types 10 char* acc_device_type_text(acc_device_t device_type) { switch(device_type) { case acc_device_none: return "acc_device_none"; case acc_device_default: return "acc_device_default"; case acc_device_host: return "acc_device_host"; case acc_device_not_host: return "acc_device_not_host"; case acc_device_nvidia: return "acc_device_nvidia"; case acc_device_radeon: return "acc_device_radeon"; case acc_device_xeonphi: return "acc_device_xeonphi"; case acc_device_pgi_opencl: return "acc_device_pgi_opencl"; case acc_device_nvidia_opencl: return "acc_device_nvidia_opencl"; case acc_device_opencl: return "acc_device_opencl"; default: return "unknown"; } } void acc_pprint_available_devices(void) { acc_device_t device_types[acc_num_device_types] = { acc_device_none, acc_device_default, acc_device_host, acc_device_not_host, acc_device_nvidia, acc_device_radeon, acc_device_xeonphi, acc_device_pgi_opencl, acc_device_nvidia_opencl, acc_device_opencl }; for(int i=0; i<acc_num_device_types; ++i) { acc_device_t device_type = device_types[i]; int num_devices = acc_get_num_devices(device_type); printf("%d x %s\n", num_devices, acc_device_type_text(device_type)); } } int main( int argc, char* argv[]) { int nelem = 100000; int iterations = 50000; int offload = 1; //int offload = 0; acc_pprint_available_devices(); if (offload) { acc_init(acc_device_nvidia); acc_set_device_type(acc_device_nvidia); } double* restrict a = (double*)mmap(0, nelem*sizeof(double), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); double* restrict b = (double*)mmap(0, nelem*sizeof(double), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); double* restrict c = (double*)mmap(0, nelem*sizeof(double), PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); #pragma omp parallel for for(int i=0; i<nelem; ++i) { a[i] = i; b[i] = i; } timestruct timer_started, timer_stopped; if (offload) { acc_copyin(a, nelem*sizeof(double)); acc_copyin(b, nelem*sizeof(double)); acc_copyin(c, nelem*sizeof(double)); } gettime( &timer_started ); for(int iteration=0; iteration<iterations; ++iteration) { #pragma acc parallel loop present(a, b, c) if(offload) for(int i=0; i<nelem; ++i) { c[i] = a[i] + b[i]; } } if (offload) { acc_copyout(c, nelem*sizeof(double)); acc_shutdown(acc_device_nvidia); } gettime( &timer_stopped ); long long elapsed = usec(timer_started, timer_stopped); printf("HEj %f elapsed %ld\n", c[nelem/2], elapsed); return 0; }
Forza.h
#ifndef Forza_h__ #define Forza_h__ struct PatternData { uint32_t Count; uint32_t Size; uint32_t Length[16]; uint32_t Skip[16]; __m128i Value[16]; }; void GeneratePattern(const char* Signature, const char* Mask, PatternData* Out) { auto l = strlen(Mask); Out->Count = 0; for (auto i = 0; i < l; i++) { if (Mask[i] == '?') continue; auto ml = 0, sl = 0; for (auto j = i; j < l; j++) { if (Mask[j] == '?' || sl >= 16) break; sl++; } for (auto j = i + sl; j < l; j++) { if (Mask[j] != '?') break; ml++; } auto c = Out->Count; Out->Length[c] = sl; Out->Skip[c] = sl + ml; Out->Value[c] = _mm_loadu_si128((const __m128i*)((uint8_t*)Signature + i)); Out->Count++; i += sl - 1; } Out->Size = l; } __forceinline bool Matches(const uint8_t* Data, PatternData* Patterns) { auto k = Data + Patterns->Skip[0]; for (auto i = 1; i < Patterns->Count; i++) { auto l = Patterns->Length[i]; if (_mm_cmpestri(Patterns->Value[i], l, _mm_loadu_si128((const __m128i*)k), l, _SIDD_CMP_EQUAL_EACH | _SIDD_MASKED_NEGATIVE_POLARITY) != l) break; if (i + 1 == Patterns->Count) return true; k += Patterns->Skip[i]; } return false; } uint8_t* FindEx(const uint8_t* Data, const uint32_t Length, const char* Signature, const char* Mask) { PatternData d; GeneratePattern(Signature, Mask, &d); auto out = static_cast<uint8_t*>(nullptr); auto end = Data + Length - d.Size; //C3010: 'break' : jump out of OpenMP structured block not allowed #pragma omp parallel for for (intptr_t i = Length - 32; i >= 0; i -= 32) { #pragma omp flush (out) if (out == nullptr) { auto p = Data + i; auto b = _mm256_loadu_si256((const __m256i*)p); if (_mm256_test_all_zeros(b, b) == 1) continue; auto f = _mm_cmpestri(d.Value[0], d.Length[0], _mm256_extractf128_si256(b, 0), 16, _SIDD_CMP_EQUAL_ORDERED); if (f == 16) { f += _mm_cmpestri(d.Value[0], d.Length[0], _mm256_extractf128_si256(b, 1), 16, _SIDD_CMP_EQUAL_ORDERED); if (f == 32) continue; } PossibleMatch: p += f; if (p + d.Size > end) { for (auto j = 0; j < d.Size && j + i + f < Length; j++) { if (Mask[j] == 'x' && (uint8_t)Signature[j] != p[j]) break; if (j + 1 == d.Size) out = (uint8_t*)p; } continue; } if (Matches(p, &d)) out = (uint8_t*)p; #pragma omp flush (out) if (out == nullptr) { p++; f = _mm_cmpestri(d.Value[0], d.Length[0], _mm_loadu_si128((const __m128i*)p), 16, _SIDD_CMP_EQUAL_ORDERED); if (f < 16) goto PossibleMatch; } } } return out; } void FindLargestArray(const char* Signature, const char* Mask, int Out[2]) { uint32_t t1 = 0; uint32_t t2 = strlen(Signature); uint32_t len = strlen(Mask); for (auto j = t2; j < len; j++) { if (Mask[j] != 'x') continue; auto count = strlen(&Signature[j]); if (count > t2) { t1 = j; t2 = count; } j += (count - 1); } Out[0] = t1; Out[1] = t2; } uint8_t* Find(const uint8_t* Data, const uint32_t Length, const char* Signature, const char* Mask) { int d[2] = { 0 }; FindLargestArray(Signature, Mask, d); const uint8_t len = static_cast<uint8_t>(strlen(Mask)); const uint8_t mbeg = static_cast<uint8_t>(d[0]); const uint8_t mlen = static_cast<uint8_t>(d[1]); const uint8_t mfirst = static_cast<uint8_t>(Signature[mbeg]); uint8_t wildcard[UCHAR_MAX + 1] = { 0 }; for (auto i = mbeg; i < mbeg + mlen; i++) wildcard[(uint8_t)Signature[i]] = 1; for (int i = Length - len; i >= 0; i--) { auto c = Data[i]; auto w = wildcard[c]; auto k = 0; while (w == 0 && i > mlen) { i -= mlen; w = wildcard[Data[i]]; k = 1; } if (k == 1) { i++; continue; } if (c != mfirst) continue; if (i - mbeg < 0 || i - mbeg + len > Length) return nullptr; for (auto j = 0; j < len - 1; j++) { if (j == mbeg || Mask[j] != 'x') continue; if (Data[i - mbeg + j] != (uint8_t)Signature[j]) break; if (j + 1 == len - 1) return (uint8_t*)(Data + i - mbeg); } } return nullptr; } struct ForzaSIMD : public BenchBase { virtual void init(Tests test) override { switch (test) { case Tests::First: Pattern = "\x45\x43\x45\x55\x33\x9a\xfa\x00\x00\x00\x00\x45\x68\x21"; Mask = "xxxxxxx????xxx"; break; case Tests::Second: Pattern = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xbb\xaa\x00\x00\x00\x00\x45\x68\x21"; Mask = "xxxxxxxxxxx????xxx"; break; default: break; } CPUSupport = Supported(); } virtual LPVOID runOne(PBYTE baseAddress, DWORD size) override { if (CPUSupport) return FindEx((const uint8_t*)baseAddress, size, Pattern, Mask); if (!Init) { std::cout << "Your CPU does not support SIMD instructions, replacing with Boyer-Moore variant." << std::endl; Init = true; } return Find((const uint8_t*)baseAddress, size, Pattern, Mask); } virtual const char* name() const override { return "Forza (SIMD With OpenMP)"; } virtual bool BackwardsSearch() const override { return true; } bool Supported() { int id[4] = { 0 }; __cpuid(id, 1); bool sse42 = (id[3] & 0x04000000) != 0; bool avx = (id[2] & 0x18000000) != 0; return (sse42 && avx); } bool Init = false; bool CPUSupport; char* Pattern; char* Mask; }; struct Forza : public BenchBase { virtual void init(Tests test) override { switch (test) { case Tests::First: Pattern = "\x45\x43\x45\x55\x33\x9a\xfa\x00\x00\x00\x00\x45\x68\x21"; Mask = "xxxxxxx????xxx"; break; case Tests::Second: Pattern = "\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xaa\xbb\xaa\x00\x00\x00\x00\x45\x68\x21"; Mask = "xxxxxxxxxxx????xxx"; break; default: break; } } virtual LPVOID runOne(PBYTE baseAddress, DWORD size) override { return Find((const uint8_t*)baseAddress, size, Pattern, Mask); } virtual const char* name() const override { return "Forza (Boyer-Moore Variant)"; } virtual bool BackwardsSearch() const override { return true; } char* Pattern; char* Mask; }; REGISTER(Forza); REGISTER(ForzaSIMD); #endif // Forza_h__
p4.c
#include <omp.h> #include <stdio.h> #include <stdlib.h> #include <time.h> #define NRA 620 /* number of rows in matrix A */ #define NCA 150 /* number of columns in matrix A */ #define NCB 70 /* number of columns in matrix B */ int main (int argc, char *argv[]) { int tid, nthreads, i, j, k, chunk; double a[NRA][NCA], /* matrix A to be multiplied */ b[NCA][NCB], /* matrix B to be multiplied */ c[NRA][NCB]; /* result matrix C */ /*** Initialize matrices ***/ for (i=0; i<NRA; i++) for (j=0; j<NCA; j++) a[i][j]= i+j; for (i=0; i<NCA; i++) for (j=0; j<NCB; j++) b[i][j]= i*j; for (i=0; i<NRA; i++) for (j=0; j<NCB; j++) c[i][j]= 0; omp_set_num_threads(4); double begin = omp_get_wtime(); #pragma omp parallel for schedule(static,1) for (i=0; i<NRA; i+=2) { for (k=0; k<NCA; k+=2) { for (j=0; j<NCB; j++) { c[i][j] += a[i][k] * b[k][j]; c[i+1][j] += a[i+1][k] * b[k][j]; c[i][j] += a[i][k+1] * b[k+1][j]; c[i+1][j] += a[i+1][k+1] * b[k+1][j]; } } } double time_spent = (double)(omp_get_wtime() - begin); { /*** Print results ***/ printf("******************************************************\n"); printf("Result Matrix:\n"); for (i=0; i<NRA; i++) { for (j=0; j<NCB; j++) printf("%6.2f ", c[i][j]); printf("\n"); } printf("******************************************************\n"); printf ("Done.\n"); printf ("Time: %f\n", time_spent); } }
PIS2TACMRSolver.h
// PIS2TA CMR Solver // Created by haoming on 2018/1/3. // Description: // #ifndef FMLBASE_PIS2TACMRSOLVER_H #define FMLBASE_PIS2TACMRSOLVER_H #include <math.h> #include <fmlbase/SolverBase.h> #include <fmlbase/PIS2TASQRTLassoSolver.h> #include <fmlbase/utils.h> #include <limits> // std::numeric_limits #include <Eigen/Dense> using Eigen::VectorXd; using Eigen::MatrixXd; namespace fmlbase{ class PIS2TACMRSolver : public PIS2TASQRTLassoSolver { public: explicit PIS2TACMRSolver(const utils::FmlParam &param); void reinitialize() override; // return the value of loss function inline double loss_value(VectorXd *theta_t) override { double objval = 0; double sqrt_ntrain_sample_inv = 1. / sqrt(1. * ntrain_sample); //#pragma omp parallel for for (int i = 0; i < nresponse; ++i) { objval += ((response_vec->segment(i * ntrain_sample, ntrain_sample)) - (*design_mat) * (theta_t->segment(i * nfeature, nfeature))).norm() * sqrt_ntrain_sample_inv; } return objval; } inline double loss_value() override { double objval =0; double sqrt_ntrain_sample_inv = 1. / sqrt(1. * ntrain_sample); //#pragma omp parallel for for (int i = 0; i < nresponse; ++i) { objval += ((response_vec->segment(i * ntrain_sample, ntrain_sample)) - (*design_mat) * (theta->segment(i * nfeature, nfeature))).norm() * sqrt_ntrain_sample_inv; } return objval; } // return the value of objective function inline double obj_value(VectorXd *theta_t) override { double objval =0; VectorXd regvec = VectorXd::Zero(nfeature); double sqrt_ntrain_sample_inv = 1. / sqrt(1. * ntrain_sample); //#pragma omp parallel for for (int i = 0; i < nresponse; ++i) { objval += ((response_vec->segment(i * ntrain_sample, ntrain_sample)) - (*design_mat) * (theta_t->segment(i * nfeature, nfeature))).norm() * sqrt_ntrain_sample_inv; regvec += theta_t->segment(i * nfeature, nfeature).array().pow(2).matrix(); } objval += lambda*regvec.cwiseSqrt().sum(); return objval; } inline double obj_value() override { double objval =0; VectorXd regvec = VectorXd::Zero(nfeature); double sqrt_ntrain_sample_inv = 1. / sqrt(1. * ntrain_sample); //#pragma omp parallel for for (int i = 0; i < nresponse; ++i) { objval += ((response_vec->segment(i * ntrain_sample, ntrain_sample)) - (*design_mat) * (theta->segment(i * nfeature, nfeature))).norm() * sqrt_ntrain_sample_inv; regvec += theta->segment(i * nfeature, nfeature).array().pow(2).matrix(); } objval += lambda*regvec.cwiseSqrt().sum(); return objval; } // return the gradient of total objective function inline void loss_grad(VectorXd &grad, VectorXd *theta_t) override { grad.setZero(); double sqrt_ntrain_sample = sqrt(1. * ntrain_sample); //#pragma omp parallel for for (int i = 0; i < nresponse; ++i) { auto residue = ((*design_mat) * (theta_t->segment(i * nfeature, nfeature)) - (response_vec->segment(i * ntrain_sample, ntrain_sample))); grad.segment(i * nfeature, nfeature) = (*design_mat).transpose() * residue; grad.segment(i * nfeature, nfeature) /= sqrt_ntrain_sample * residue.norm(); } } inline void loss_grad(VectorXd &grad) override { grad.setZero(); double sqrt_ntrain_sample = sqrt(1. * ntrain_sample); //#pragma omp parallel for for (int i = 0; i < nresponse; ++i) { auto residue = ((*design_mat) * (theta->segment(i * nfeature, nfeature)) - (response_vec->segment(i * ntrain_sample, ntrain_sample))); grad.segment(i * nfeature, nfeature) = (*design_mat).transpose() * residue; grad.segment(i * nfeature, nfeature) /= sqrt_ntrain_sample * residue.norm(); } } inline double loss_a_grad(VectorXd &grad) override { double objval = 0; grad.setZero(); double sqrt_ntrain_sample = sqrt(1. * ntrain_sample); //#pragma omp parallel for for (int i = 0; i < nresponse; ++i) { auto residue = ((*design_mat) * (theta->segment(i * nfeature, nfeature)) - (response_vec->segment(i * ntrain_sample, ntrain_sample))); grad.segment(i * nfeature, nfeature) = (*design_mat).transpose() * residue; grad.segment(i * nfeature, nfeature) /= sqrt_ntrain_sample * residue.norm(); objval += residue.norm() / sqrt_ntrain_sample; } return objval; } // return the gradient of total objective function with sub-gradient taking 0 at 0. inline void obj_grad(VectorXd &grad, VectorXd *theta_t) override { grad.setZero(); double sqrt_ntrain_sample = sqrt(1. * ntrain_sample); VectorXd regvec = VectorXd::Zero(nfeature); //#pragma omp parallel for for (int i = 0; i < nresponse; ++i) { auto residue = ((*design_mat) * (theta_t->segment(i * nfeature, nfeature)) - (response_vec->segment(i * ntrain_sample, ntrain_sample))); grad.segment(i * nfeature, nfeature) = (*design_mat).transpose() * residue; grad.segment(i * nfeature, nfeature) /= sqrt_ntrain_sample * residue.norm(); regvec += theta_t->segment(i * nfeature, nfeature).array().pow(2).matrix(); } regvec = regvec.cwiseSqrt(); //#pragma omp parallel for for (int i = 0; i < nresponse; ++i) { grad.segment(i * nfeature, nfeature) += lambda*((theta_t->segment(i * nfeature, nfeature).array() / (regvec.array() + 0.0000000001)) * regvec.cwiseSign().cwiseAbs().array()).matrix(); } } inline void obj_grad(VectorXd &grad) override { grad.setZero(); double sqrt_ntrain_sample = sqrt(1. * ntrain_sample); VectorXd regvec = VectorXd::Zero(nfeature); //#pragma omp parallel for for (int i = 0; i < nresponse; ++i) { auto residue = ((*design_mat) * (theta->segment(i * nfeature, nfeature)) - (response_vec->segment(i * ntrain_sample, ntrain_sample))); grad.segment(i * nfeature, nfeature) = (*design_mat).transpose() * residue; grad.segment(i * nfeature, nfeature) /= sqrt_ntrain_sample * residue.norm(); regvec += theta->segment(i * nfeature, nfeature).array().pow(2).matrix(); } regvec = regvec.cwiseSqrt(); //#pragma omp parallel for for (int i = 0; i < nresponse; ++i) { grad.segment(i * nfeature, nfeature) += lambda*((theta->segment(i * nfeature, nfeature).array() / (regvec.array() + 0.0000000001)) * regvec.cwiseSign().cwiseAbs().array()).matrix(); } } inline double hessian_norm() override { double hessian_norm = 1; double sqrt_ntrain_sample_inv = 1. / sqrt(1. * ntrain_sample); //#pragma omp parallel for for (int i = 0; i < nresponse; ++i) { auto subtheta = (theta->segment(i * nfeature, nfeature)); auto residue = ((response_vec->segment(i * ntrain_sample, ntrain_sample)) - (*design_mat) * subtheta); double residue_norm = residue.norm(); auto temp_vec = (*design_mat).transpose() * residue; auto hessianMat = 1. / (residue_norm * sqrt(1. * ntrain_sample)) * ((design_mat->transpose() * (*design_mat)) - (temp_vec * temp_vec.transpose()) / pow(residue_norm, 2)); hessian_norm *= hessianMat.norm(); } return hessian_norm; } // predict virtual VectorXd predict(int lambdaIdx, int responseIdx); // for training data VectorXd predict(int responseIdx) override { predict(-1,responseIdx); } // for training data VectorXd predict() override { throw std::runtime_error("Can not call predict without parameter in CMR\n"); } // for training data virtual VectorXd predict(const MatrixXd &newX, int lambdaIdx, int responseIdx); VectorXd predict(const MatrixXd &newX, int responseIdx) override { predict(newX, -1, responseIdx); }; VectorXd predict(const MatrixXd &newX) override { throw std::runtime_error("Can not call predict without without specified responseIdx in CMR\n"); } // residue norm / sqrt(n) double eval(int lambdaIdx) override; // for training data double eval() override{ return eval(-1); } // for training data double eval(const MatrixXd &newX, const MatrixXd &targetY, int lambdaIdx); double eval(const MatrixXd &newX, const MatrixXd &targetY){ return eval(newX, targetY, -1); } }; } // namespace fmlbase #endif //FMLBASE_PIS2TACMRSOLVER_H
data.c
#include "data.h" #include "utils.h" #include "image.h" #include "opencl.h" #include <stdio.h> #include <stdlib.h> #include <string.h> pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER; #define class temp #define new new_temp list *get_paths(char *filename) { if (filename) filename[strcspn(filename, "\n\r")] = 0; char *pos; if ((pos=strchr(filename, '\r')) != NULL) *pos = '\0'; if ((pos=strchr(filename, '\n')) != NULL) *pos = '\0'; char *path; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); list *lines = make_list(); while((path=fgetl(file))){ if (path) path[strcspn(path, "\n\r")] = 0; list_insert(lines, path); } fclose(file); return lines; } /* char **get_random_paths_indexes(char **paths, int n, int m, int *indexes) { char **random_paths = calloc(n, sizeof(char*)); int i; pthread_mutex_lock(&mutex); for(i = 0; i < n; ++i){ int index = rand()%m; indexes[i] = index; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } */ char **get_random_paths(char **paths, int n, int m) { pthread_mutex_lock(&mutex); char **random_paths = (char**)calloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ int index = rand()%m; random_paths[i] = paths[index]; //if(i == 0) printf("%s\n", paths[index]); } pthread_mutex_unlock(&mutex); return random_paths; } char **find_replace_paths(char **paths, int n, char *find, char *replace) { char **replace_paths = (char**)calloc(n, sizeof(char*)); int i; for(i = 0; i < n; ++i){ char replaced[4096]; find_replace(paths[i], find, replace, replaced); replace_paths[i] = copy_string(replaced); } return replace_paths; } matrix load_image_paths_gray(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image(paths[i], w, h, 3); image gray = grayscale_image(im); free_image(im); im = gray; X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_paths(char **paths, int n, int w, int h) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], w, h); X.vals[i] = im.data; X.cols = im.h*im.w*im.c; } return X; } matrix load_image_augment_paths(char **paths, int n, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { int i; matrix X; X.rows = n; X.vals = (float**)calloc(X.rows, sizeof(float*)); X.cols = 0; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop; if(center){ crop = center_crop_image(im, size, size); } else { crop = random_augment_image(im, angle, aspect, min, max, size, size); } int flip = rand()%2; if (flip) flip_image(crop); random_distort_image(crop, hue, saturation, exposure); /* show_image(im, "orig"); show_image(crop, "crop"); cvWaitKey(0); */ //grayscale_image_3c(crop); free_image(im); X.vals[i] = crop.data; X.cols = crop.h*crop.w*crop.c; } return X; } box_label *read_boxes(char *filename, int *n) { if (filename) filename[strcspn(filename, "\n\r")] = 0; char *pos; if ((pos=strchr(filename, '\r')) != NULL) *pos = '\0'; if ((pos=strchr(filename, '\n')) != NULL) *pos = '\0'; FILE *file = fopen(filename, "r"); if(!file) file_error(filename); float x, y, h, w; int id; int count = 0; int size = 64; box_label *boxes = (box_label*)calloc(size, sizeof(box_label)); while(fscanf(file, "%d %f %f %f %f", &id, &x, &y, &w, &h) == 5){ if(count == size) { size = size * 2; boxes = (box_label*)realloc(boxes, size*sizeof(box_label)); } boxes[count].id = id; boxes[count].x = x; boxes[count].y = y; boxes[count].h = h; boxes[count].w = w; boxes[count].left = x - w/2; boxes[count].right = x + w/2; boxes[count].top = y - h/2; boxes[count].bottom = y + h/2; ++count; } fclose(file); *n = count; return boxes; } void randomize_boxes(box_label *b, int n) { int i; for(i = 0; i < n; ++i){ box_label swap = b[i]; int index = rand()%n; b[i] = b[index]; b[index] = swap; } } void correct_boxes(box_label *boxes, int n, float dx, float dy, float sx, float sy, int flip) { int i; for(i = 0; i < n; ++i){ if(boxes[i].x == 0 && boxes[i].y == 0) { boxes[i].x = 999999; boxes[i].y = 999999; boxes[i].w = 999999; boxes[i].h = 999999; continue; } boxes[i].left = boxes[i].left * sx - dx; boxes[i].right = boxes[i].right * sx - dx; boxes[i].top = boxes[i].top * sy - dy; boxes[i].bottom = boxes[i].bottom* sy - dy; if(flip){ float swap = boxes[i].left; boxes[i].left = 1. - boxes[i].right; boxes[i].right = 1. - swap; } boxes[i].left = constrain(0, 1, boxes[i].left); boxes[i].right = constrain(0, 1, boxes[i].right); boxes[i].top = constrain(0, 1, boxes[i].top); boxes[i].bottom = constrain(0, 1, boxes[i].bottom); boxes[i].x = (boxes[i].left+boxes[i].right)/2; boxes[i].y = (boxes[i].top+boxes[i].bottom)/2; boxes[i].w = (boxes[i].right - boxes[i].left); boxes[i].h = (boxes[i].bottom - boxes[i].top); boxes[i].w = constrain(0, 1, boxes[i].w); boxes[i].h = constrain(0, 1, boxes[i].h); } } void fill_truth_swag(char *path, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count && i < 90; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .0 || h < .0) continue; int index = (4+classes) * i; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; if (id < classes) truth[index+id] = 1; } free(boxes); } void fill_truth_region(char *path, float *truth, int classes, int num_boxes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); float x,y,w,h; int id; int i; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if (w < .005 || h < .005) continue; int col = (int)(x*num_boxes); int row = (int)(y*num_boxes); x = x*num_boxes - col; y = y*num_boxes - row; int index = (col+row*num_boxes)*(5+classes); if (truth[index]) continue; truth[index++] = 1; if (id < classes) truth[index+id] = 1; index += classes; truth[index++] = x; truth[index++] = y; truth[index++] = w; truth[index++] = h; } free(boxes); } void load_rle(image im, int *rle, int n) { int count = 0; int curr = 0; int i,j; for(i = 0; i < n; ++i){ for(j = 0; j < rle[i]; ++j){ im.data[count++] = curr; } curr = 1 - curr; } for(; count < im.h*im.w*im.c; ++count){ im.data[count] = curr; } } void or_image(image src, image dest, int c) { int i; for(i = 0; i < src.w*src.h; ++i){ if(src.data[i]) dest.data[dest.w*dest.h*c + i] = 1; } } void exclusive_image(image src) { int k, j, i; int s = src.w*src.h; for(k = 0; k < src.c-1; ++k){ for(i = 0; i < s; ++i){ if (src.data[k*s + i]){ for(j = k+1; j < src.c; ++j){ src.data[j*s + i] = 0; } } } } } box bound_image(image im) { int x,y; int minx = im.w; int miny = im.h; int maxx = 0; int maxy = 0; for(y = 0; y < im.h; ++y){ for(x = 0; x < im.w; ++x){ if(im.data[y*im.w + x]){ minx = (x < minx) ? x : minx; miny = (y < miny) ? y : miny; maxx = (x > maxx) ? x : maxx; maxy = (y > maxy) ? y : maxy; } } } box b = {minx, miny, maxx-minx + 1, maxy-miny + 1}; //printf("%f %f %f %f\n", b.x, b.y, b.w, b.h); return b; } void fill_truth_iseg(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; int i = 0; int j; image part = make_image(w, h, 1); while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if(flip) flip_image(sized); image mask = resize_image(sized, mw, mh); truth[i*(mw*mh+1)] = id; for(j = 0; j < mw*mh; ++j){ truth[i*(mw*mh + 1) + 1 + j] = mask.data[j]; } ++i; free_image(mask); free_image(sized); free(rle); } if(i < num_boxes) truth[i*(mw*mh+1)] = -1; fclose(file); free_image(part); } void fill_truth_mask(char *path, int num_boxes, float *truth, int classes, int w, int h, augment_args aug, int flip, int mw, int mh) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; int i = 0; image part = make_image(w, h, 1); while((fscanf(file, "%d %s", &id, buff) == 2) && i < num_boxes){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); image sized = rotate_crop_image(part, aug.rad, aug.scale, aug.w, aug.h, aug.dx, aug.dy, aug.aspect); if(flip) flip_image(sized); box b = bound_image(sized); if(b.w > 0){ image crop = crop_image(sized, b.x, b.y, b.w, b.h); image mask = resize_image(crop, mw, mh); truth[i*(4 + mw*mh + 1) + 0] = (b.x + b.w/2.)/sized.w; truth[i*(4 + mw*mh + 1) + 1] = (b.y + b.h/2.)/sized.h; truth[i*(4 + mw*mh + 1) + 2] = b.w/sized.w; truth[i*(4 + mw*mh + 1) + 3] = b.h/sized.h; int j; for(j = 0; j < mw*mh; ++j){ truth[i*(4 + mw*mh + 1) + 4 + j] = mask.data[j]; } truth[i*(4 + mw*mh + 1) + 4 + mw*mh] = id; free_image(crop); free_image(mask); ++i; } free_image(sized); free(rle); } fclose(file); free_image(part); } void fill_truth_detection(char *path, int num_boxes, float *truth, int classes, int flip, float dx, float dy, float sx, float sy) { char labelpath[4096]; find_replace(path, "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, "raw", "labels", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); int count = 0; box_label *boxes = read_boxes(labelpath, &count); randomize_boxes(boxes, count); correct_boxes(boxes, count, dx, dy, sx, sy, flip); if(count > num_boxes) count = num_boxes; float x,y,w,h; int id; int i; int sub = 0; for (i = 0; i < count; ++i) { x = boxes[i].x; y = boxes[i].y; w = boxes[i].w; h = boxes[i].h; id = boxes[i].id; if ((w < .001 || h < .001)) { ++sub; continue; } truth[(i-sub)*5+0] = x; truth[(i-sub)*5+1] = y; truth[(i-sub)*5+2] = w; truth[(i-sub)*5+3] = h; truth[(i-sub)*5+4] = id; } free(boxes); } #define NUMCHARS 37 void print_letters(float *pred, int n) { int i; for(i = 0; i < n; ++i){ int index = max_index(pred+i*NUMCHARS, NUMCHARS); printf("%c", int_to_alphanum(index)); } printf("\n"); } void fill_truth_captcha(char *path, int n, float *truth) { char *begin = strrchr(path, '/'); ++begin; int i; for(i = 0; i < strlen(begin) && i < n && begin[i] != '.'; ++i){ int index = alphanum_to_int(begin[i]); if(index > 35) printf("Bad %c\n", begin[i]); truth[i*NUMCHARS+index] = 1; } for(;i < n; ++i){ truth[i*NUMCHARS + NUMCHARS-1] = 1; } } data load_data_captcha(char **paths, int n, int m, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = make_matrix(n, k*NUMCHARS); int i; for(i = 0; i < n; ++i){ fill_truth_captcha(paths[i], k, d.y.vals[i]); } if(m) free(paths); return d; } data load_data_captcha_encode(char **paths, int n, int m, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.X.cols = 17100; d.y = d.X; if(m) free(paths); return d; } void fill_truth(char *path, char **labels, int k, float *truth) { int i; memset(truth, 0, k*sizeof(float)); int count = 0; for(i = 0; i < k; ++i){ if(strstr(path, labels[i])){ truth[i] = 1; ++count; //printf("%s %s %d\n", path, labels[i], i); } } if(count != 1 && (k != 1 || count != 0)) printf("Too many or too few labels: %d, %s\n", count, path); } void fill_hierarchy(float *truth, int k, tree *hierarchy) { int j; for(j = 0; j < k; ++j){ if(truth[j]){ int parent = hierarchy->parent[j]; while(parent >= 0){ truth[parent] = 1; parent = hierarchy->parent[parent]; } } } int i; int count = 0; for(j = 0; j < hierarchy->groups; ++j){ //printf("%d\n", count); int mask = 1; for(i = 0; i < hierarchy->group_size[j]; ++i){ if(truth[count + i]){ mask = 0; break; } } if (mask) { for(i = 0; i < hierarchy->group_size[j]; ++i){ truth[count + i] = SECRET_NUM; } } count += hierarchy->group_size[j]; } } matrix load_regression_labels_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i,j; for(i = 0; i < n; ++i){ char labelpath[4096]; find_replace(paths[i], "images", "labels", labelpath); find_replace(labelpath, "JPEGImages", "labels", labelpath); find_replace(labelpath, ".BMP", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPeG", ".txt", labelpath); find_replace(labelpath, ".Jpeg", ".txt", labelpath); find_replace(labelpath, ".PNG", ".txt", labelpath); find_replace(labelpath, ".TIF", ".txt", labelpath); find_replace(labelpath, ".bmp", ".txt", labelpath); find_replace(labelpath, ".jpeg", ".txt", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".png", ".txt", labelpath); find_replace(labelpath, ".tif", ".txt", labelpath); FILE *file = fopen(labelpath, "r"); for(j = 0; j < k; ++j){ fscanf(file, "%f", &(y.vals[i][j])); } fclose(file); } return y; } matrix load_labels_paths(char **paths, int n, char **labels, int k, tree *hierarchy) { matrix y = make_matrix(n, k); int i; for(i = 0; i < n && labels; ++i){ fill_truth(paths[i], labels, k, y.vals[i]); if(hierarchy){ fill_hierarchy(y.vals[i], k, hierarchy); } } return y; } matrix load_tags_paths(char **paths, int n, int k) { matrix y = make_matrix(n, k); int i; //int count = 0; for(i = 0; i < n; ++i){ char label[4096]; find_replace(paths[i], "images", "labels", label); find_replace(label, ".jpg", ".txt", label); FILE *file = fopen(label, "r"); if (!file) continue; //++count; int tag; while(fscanf(file, "%d", &tag) == 1){ if(tag < k){ y.vals[i][tag] = 1; } } fclose(file); } //printf("%d/%d\n", count, n); return y; } char **get_labels(char *filename) { list *plist = get_paths(filename); char **labels = (char **)list_to_array(plist); free_list(plist); return labels; } void free_data(data d) { if(!d.shallow){ free_matrix(d.X); free_matrix(d.y); }else{ free(d.X.vals); free(d.y.vals); } } image get_segmentation_image(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes); FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } image get_segmentation_image2(char *path, int w, int h, int classes) { char labelpath[4096]; find_replace(path, "images", "mask", labelpath); find_replace(labelpath, "JPEGImages", "mask", labelpath); find_replace(labelpath, ".jpg", ".txt", labelpath); find_replace(labelpath, ".JPG", ".txt", labelpath); find_replace(labelpath, ".JPEG", ".txt", labelpath); image mask = make_image(w, h, classes+1); int i; for(i = 0; i < w*h; ++i){ mask.data[w*h*classes + i] = 1; } FILE *file = fopen(labelpath, "r"); if(!file) file_error(labelpath); char buff[32788]; int id; image part = make_image(w, h, 1); while(fscanf(file, "%d %s", &id, buff) == 2){ int n = 0; int *rle = read_intlist(buff, &n, 0); load_rle(part, rle, n); or_image(part, mask, id); for(i = 0; i < w*h; ++i){ if(part.data[i]) mask.data[w*h*classes + i] = 0; } free(rle); } //exclusive_image(mask); fclose(file); free_image(part); return mask; } data load_data_seg(int n, char **paths, int m, int w, int h, int classes, int min, int max, float angle, float aspect, float hue, float saturation, float exposure, int div) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y.rows = n; d.y.cols = h*w*classes/div/div; d.y.vals = (float**)calloc(d.X.rows, sizeof(float*)); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; image mask = get_segmentation_image(random_paths[i], orig.w, orig.h, classes); //image mask = make_image(orig.w, orig.h, classes+1); image sized_m = rotate_crop_image(mask, a.rad, a.scale/div, a.w/div, a.h/div, a.dx/div, a.dy/div, a.aspect); if(flip) flip_image(sized_m); d.y.vals[i] = sized_m.data; free_image(orig); free_image(mask); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_iseg(int n, char **paths, int m, int w, int h, int classes, int boxes, int div, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, (((w/div)*(h/div))+1)*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_iseg(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, w/div, h/div); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_mask(int n, char **paths, int m, int w, int h, int classes, int boxes, int coords, int min, int max, float angle, float aspect, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, (coords+1)*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); augment_args a = random_augment_args(orig, angle, aspect, min, max, w, h); image sized = rotate_crop_image(orig, a.rad, a.scale, a.w, a.h, a.dx, a.dy, a.aspect); int flip = rand()%2; if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; //show_image(sized, "image"); fill_truth_mask(random_paths[i], boxes, d.y.vals[i], classes, orig.w, orig.h, a, flip, 14, 14); free_image(orig); /* image rgb = mask_to_rgb(sized_m, classes); show_image(rgb, "part"); show_image(sized, "orig"); cvWaitKey(0); free_image(rgb); */ } free(random_paths); return d; } data load_data_region(int n, char **paths, int m, int w, int h, int size, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = size*size*(5+classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); int oh = orig.h; int ow = orig.w; int dw = (ow*jitter); int dh = (oh*jitter); int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = ow - pleft - pright; int sheight = oh - ptop - pbot; float sx = (float)swidth / ow; float sy = (float)sheight / oh; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/ow)/sx; float dy = ((float)ptop /oh)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); random_distort_image(sized, hue, saturation, exposure); d.X.vals[i] = sized.data; fill_truth_region(random_paths[i], d.y.vals[i], classes, size, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); } free(random_paths); return d; } data load_data_compare(int n, char **paths, int m, int classes, int w, int h) { if(m) paths = get_random_paths(paths, 2*n, m); int i,j; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*6; int k = 2*(classes); d.y = make_matrix(n, k); for(i = 0; i < n; ++i){ image im1 = load_image_color(paths[i*2], w, h); image im2 = load_image_color(paths[i*2+1], w, h); d.X.vals[i] = (float*)calloc(d.X.cols, sizeof(float)); memcpy(d.X.vals[i], im1.data, h*w*3*sizeof(float)); memcpy(d.X.vals[i] + h*w*3, im2.data, h*w*3*sizeof(float)); int id; float iou; char imlabel1[4096]; char imlabel2[4096]; find_replace(paths[i*2], "imgs", "labels", imlabel1); find_replace(imlabel1, "jpg", "txt", imlabel1); FILE *fp1 = fopen(imlabel1, "r"); while(fscanf(fp1, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id] < iou) d.y.vals[i][2*id] = iou; } find_replace(paths[i*2+1], "imgs", "labels", imlabel2); find_replace(imlabel2, "jpg", "txt", imlabel2); FILE *fp2 = fopen(imlabel2, "r"); while(fscanf(fp2, "%d %f", &id, &iou) == 2){ if (d.y.vals[i][2*id + 1] < iou) d.y.vals[i][2*id + 1] = iou; } for (j = 0; j < classes; ++j){ if (d.y.vals[i][2*j] > .5 && d.y.vals[i][2*j+1] < .5){ d.y.vals[i][2*j] = 1; d.y.vals[i][2*j+1] = 0; } else if (d.y.vals[i][2*j] < .5 && d.y.vals[i][2*j+1] > .5){ d.y.vals[i][2*j] = 0; d.y.vals[i][2*j+1] = 1; } else { d.y.vals[i][2*j] = SECRET_NUM; d.y.vals[i][2*j+1] = SECRET_NUM; } } fclose(fp1); fclose(fp2); free_image(im1); free_image(im2); } if(m) free(paths); return d; } data load_data_swag(char **paths, int n, int classes, float jitter) { int index = rand()%n; char *random_path = paths[index]; image orig = load_image_color(random_path, 0, 0); int h = orig.h; int w = orig.w; data d = {0}; d.shallow = 0; d.w = w; d.h = h; d.X.rows = 1; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; int k = (4+classes)*90; d.y = make_matrix(1, k); int dw = w*jitter; int dh = h*jitter; int pleft = rand_uniform(-dw, dw); int pright = rand_uniform(-dw, dw); int ptop = rand_uniform(-dh, dh); int pbot = rand_uniform(-dh, dh); int swidth = w - pleft - pright; int sheight = h - ptop - pbot; float sx = (float)swidth / w; float sy = (float)sheight / h; int flip = rand()%2; image cropped = crop_image(orig, pleft, ptop, swidth, sheight); float dx = ((float)pleft/w)/sx; float dy = ((float)ptop /h)/sy; image sized = resize_image(cropped, w, h); if(flip) flip_image(sized); d.X.vals[0] = sized.data; fill_truth_swag(random_path, d.y.vals[0], classes, flip, dx, dy, 1./sx, 1./sy); free_image(orig); free_image(cropped); return d; } data load_data_detection(int n, char **paths, int m, int w, int h, int boxes, int classes, float jitter, float hue, float saturation, float exposure) { char **random_paths = get_random_paths(paths, n, m); int i; data d = {0}; d.shallow = 0; d.X.rows = n; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.X.cols = h*w*3; d.y = make_matrix(n, 5*boxes); for(i = 0; i < n; ++i){ image orig = load_image_color(random_paths[i], 0, 0); image sized = make_image(w, h, orig.c); fill_image(sized, .5); float dw = jitter * orig.w; float dh = jitter * orig.h; float new_ar = (orig.w + rand_uniform(-dw, dw)) / (orig.h + rand_uniform(-dh, dh)); //float scale = rand_uniform(.25, 2); float scale = 1; float nw, nh; if(new_ar < 1){ nh = scale * h; nw = nh * new_ar; } else { nw = scale * w; nh = nw / new_ar; } float dx = rand_uniform(0, w - nw); float dy = rand_uniform(0, h - nh); place_image(orig, nw, nh, dx, dy, sized); random_distort_image(sized, hue, saturation, exposure); int flip = rand()%2; if(flip) flip_image(sized); d.X.vals[i] = sized.data; fill_truth_detection(random_paths[i], boxes, d.y.vals[i], classes, flip, -dx/w, -dy/h, nw/w, nh/h); free_image(orig); } free(random_paths); return d; } void *load_thread(void *ptr) { //printf("Loading data: %d\n", rand()); load_args a = *(struct load_args*)ptr; if(a.exposure == 0) a.exposure = 1; if(a.saturation == 0) a.saturation = 1; if(a.aspect == 0) a.aspect = 1; if (a.type == OLD_CLASSIFICATION_DATA){ *a.d = load_data_old(a.paths, a.n, a.m, a.labels, a.classes, a.w, a.h); } else if (a.type == REGRESSION_DATA){ *a.d = load_data_regression(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == CLASSIFICATION_DATA){ *a.d = load_data_augment(a.paths, a.n, a.m, a.labels, a.classes, a.hierarchy, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.center); } else if (a.type == SUPER_DATA){ *a.d = load_data_super(a.paths, a.n, a.m, a.w, a.h, a.scale); } else if (a.type == WRITING_DATA){ *a.d = load_data_writing(a.paths, a.n, a.m, a.w, a.h, a.out_w, a.out_h); } else if (a.type == ISEG_DATA){ *a.d = load_data_iseg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.scale, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == INSTANCE_DATA){ *a.d = load_data_mask(a.n, a.paths, a.m, a.w, a.h, a.classes, a.num_boxes, a.coords, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } else if (a.type == SEGMENTATION_DATA){ *a.d = load_data_seg(a.n, a.paths, a.m, a.w, a.h, a.classes, a.min, a.max, a.angle, a.aspect, a.hue, a.saturation, a.exposure, a.scale); } else if (a.type == REGION_DATA){ *a.d = load_data_region(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == DETECTION_DATA){ *a.d = load_data_detection(a.n, a.paths, a.m, a.w, a.h, a.num_boxes, a.classes, a.jitter, a.hue, a.saturation, a.exposure); } else if (a.type == SWAG_DATA){ *a.d = load_data_swag(a.paths, a.n, a.classes, a.jitter); } else if (a.type == COMPARE_DATA){ *a.d = load_data_compare(a.n, a.paths, a.m, a.classes, a.w, a.h); } else if (a.type == IMAGE_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = resize_image(*(a.im), a.w, a.h); } else if (a.type == LETTERBOX_DATA){ *(a.im) = load_image_color(a.path, 0, 0); *(a.resized) = letterbox_image(*(a.im), a.w, a.h); } else if (a.type == TAG_DATA){ *a.d = load_data_tag(a.paths, a.n, a.m, a.classes, a.min, a.max, a.size, a.angle, a.aspect, a.hue, a.saturation, a.exposure); } free(ptr); return 0; } pthread_t load_data_in_thread(load_args args) { pthread_t thread; struct load_args *ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_thread, ptr)) error("Thread creation failed"); return thread; } void *load_threads(void *ptr) { int i; load_args args = *(load_args *)ptr; if (args.threads == 0) args.threads = 1; data *out = args.d; int total = args.n; free(ptr); data *buffers = (data*)calloc(args.threads, sizeof(data)); pthread_t *threads = (pthread_t*)calloc(args.threads, sizeof(pthread_t)); for(i = 0; i < args.threads; ++i){ args.d = buffers + i; args.n = (i+1) * total/args.threads - i * total/args.threads; threads[i] = load_data_in_thread(args); } for(i = 0; i < args.threads; ++i){ pthread_join(threads[i], 0); } *out = concat_datas(buffers, args.threads); out->shallow = 0; for(i = 0; i < args.threads; ++i){ buffers[i].shallow = 1; free_data(buffers[i]); } free(buffers); free(threads); return 0; } void load_data_blocking(load_args args) { struct load_args *ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; load_thread(ptr); } pthread_t load_data(load_args args) { pthread_t thread; struct load_args *ptr = (load_args*)calloc(1, sizeof(struct load_args)); *ptr = args; if(pthread_create(&thread, 0, load_threads, ptr)) error("Thread creation failed"); return thread; } data load_data_writing(char **paths, int n, int m, int w, int h, int out_w, int out_h) { if(m) paths = get_random_paths(paths, n, m); char **replace_paths = find_replace_paths(paths, n, ".png", "-label.png"); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_image_paths_gray(replace_paths, n, out_w, out_h); if(m) free(paths); int i; for(i = 0; i < n; ++i) free(replace_paths[i]); free(replace_paths); return d; } data load_data_old(char **paths, int n, int m, char **labels, int k, int w, int h) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_paths(paths, n, w, h); d.y = load_labels_paths(paths, n, labels, k, 0); if(m) free(paths); return d; } /* data load_data_study(char **paths, int n, int m, char **labels, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { data d = {0}; d.indexes = calloc(n, sizeof(int)); if(m) paths = get_random_paths_indexes(paths, n, m, d.indexes); d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure); d.y = load_labels_paths(paths, n, labels, k); if(m) free(paths); return d; } */ data load_data_super(char **paths, int n, int m, int w, int h, int scale) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; int i; d.X.rows = n; d.X.vals = (float**)calloc(n, sizeof(float*)); d.X.cols = w*h*3; d.y.rows = n; d.y.vals = (float**)calloc(n, sizeof(float*)); d.y.cols = w*scale * h*scale * 3; for(i = 0; i < n; ++i){ image im = load_image_color(paths[i], 0, 0); image crop = random_crop_image(im, w*scale, h*scale); int flip = rand()%2; if (flip) flip_image(crop); image resize = resize_image(crop, w, h); d.X.vals[i] = resize.data; d.y.vals[i] = crop.data; free_image(im); } if(m) free(paths); return d; } data load_data_regression(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_regression_labels_paths(paths, n, k); if(m) free(paths); return d; } data select_data(data *orig, int *inds) { data d = {0}; d.shallow = 1; d.w = orig[0].w; d.h = orig[0].h; d.X.rows = orig[0].X.rows; d.y.rows = orig[0].X.rows; d.X.cols = orig[0].X.cols; d.y.cols = orig[0].y.cols; d.X.vals = (float**)calloc(orig[0].X.rows, sizeof(float *)); d.y.vals = (float**)calloc(orig[0].y.rows, sizeof(float *)); int i; for(i = 0; i < d.X.rows; ++i){ d.X.vals[i] = orig[inds[i]].X.vals[i]; d.y.vals[i] = orig[inds[i]].y.vals[i]; } return d; } data *tile_data(data orig, int divs, int size) { data *ds = (data*)calloc(divs*divs, sizeof(data)); int i, j; #pragma omp parallel for for(i = 0; i < divs*divs; ++i){ data d; d.shallow = 0; d.w = orig.w/divs * size; d.h = orig.h/divs * size; d.X.rows = orig.X.rows; d.X.cols = d.w*d.h*3; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(j = 0; j < orig.X.rows; ++j){ int x = (i%divs) * orig.w / divs - (d.w - orig.w/divs)/2; int y = (i/divs) * orig.h / divs - (d.h - orig.h/divs)/2; image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[j]); d.X.vals[j] = crop_image(im, x, y, d.w, d.h).data; } ds[i] = d; } return ds; } data resize_data(data orig, int w, int h) { data d = {0}; d.shallow = 0; d.w = w; d.h = h; int i; d.X.rows = orig.X.rows; d.X.cols = w*h*3; d.X.vals = (float**)calloc(d.X.rows, sizeof(float*)); d.y = copy_matrix(orig.y); #pragma omp parallel for for(i = 0; i < orig.X.rows; ++i){ image im = float_to_image(orig.w, orig.h, 3, orig.X.vals[i]); d.X.vals[i] = resize_image(im, w, h).data; } return d; } data load_data_augment(char **paths, int n, int m, char **labels, int k, tree *hierarchy, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure, int center) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.shallow = 0; d.w=size; d.h=size; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, center); d.y = load_labels_paths(paths, n, labels, k, hierarchy); if(m) free(paths); return d; } data load_data_tag(char **paths, int n, int m, int k, int min, int max, int size, float angle, float aspect, float hue, float saturation, float exposure) { if(m) paths = get_random_paths(paths, n, m); data d = {0}; d.w = size; d.h = size; d.shallow = 0; d.X = load_image_augment_paths(paths, n, min, max, size, angle, aspect, hue, saturation, exposure, 0); d.y = load_tags_paths(paths, n, k); if(m) free(paths); return d; } matrix concat_matrix(matrix m1, matrix m2) { int i, count = 0; matrix m; m.cols = m1.cols; m.rows = m1.rows+m2.rows; m.vals = (float**)calloc(m1.rows + m2.rows, sizeof(float*)); for(i = 0; i < m1.rows; ++i){ m.vals[count++] = m1.vals[i]; } for(i = 0; i < m2.rows; ++i){ m.vals[count++] = m2.vals[i]; } return m; } data concat_data(data d1, data d2) { data d = {0}; d.shallow = 1; d.X = concat_matrix(d1.X, d2.X); d.y = concat_matrix(d1.y, d2.y); d.w = d1.w; d.h = d1.h; return d; } data concat_datas(data *d, int n) { int i; data out = {0}; for(i = 0; i < n; ++i){ data new = concat_data(d[i], out); free_data(out); out = new; } return out; } data load_categorical_data_csv(char *filename, int target, int k) { data d = {0}; d.shallow = 0; matrix X = csv_to_matrix(filename); float *truth_1d = pop_column(&X, target); float **truth = one_hot_encode(truth_1d, X.rows, k); matrix y; y.rows = X.rows; y.cols = k; y.vals = truth; d.X = X; d.y = y; free(truth_1d); return d; } data load_cifar10_data(char *filename) { data d = {0}; d.shallow = 0; long i,j; matrix X = make_matrix(10000, 3072); matrix y = make_matrix(10000, 10); d.X = X; d.y = y; FILE *fp = fopen(filename, "rb"); if(!fp) file_error(filename); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i][j] = (double)bytes[j+1]; } } scale_data_rows(d, 1./255); //normalize_data_rows(d); fclose(fp); return d; } void get_random_batch(data d, int n, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = rand()%d.X.rows; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void get_next_batch(data d, int n, int offset, float *X, float *y) { int j; for(j = 0; j < n; ++j){ int index = offset + j; memcpy(X+j*d.X.cols, d.X.vals[index], d.X.cols*sizeof(float)); if(y) memcpy(y+j*d.y.cols, d.y.vals[index], d.y.cols*sizeof(float)); } } void smooth_data(data d) { int i, j; float scale = 1. / d.y.cols; float eps = .1; for(i = 0; i < d.y.rows; ++i){ for(j = 0; j < d.y.cols; ++j){ d.y.vals[i][j] = eps * scale + (1-eps) * d.y.vals[i][j]; } } } data load_all_cifar10() { data d = {0}; d.shallow = 0; int i,j,b; matrix X = make_matrix(50000, 3072); matrix y = make_matrix(50000, 10); d.X = X; d.y = y; for(b = 0; b < 5; ++b){ char buff[256]; sprintf(buff, "data/cifar/cifar-10-batches-bin/data_batch_%d.bin", b+1); FILE *fp = fopen(buff, "rb"); if(!fp) file_error(buff); for(i = 0; i < 10000; ++i){ unsigned char bytes[3073]; fread(bytes, 1, 3073, fp); int class = bytes[0]; y.vals[i+b*10000][class] = 1; for(j = 0; j < X.cols; ++j){ X.vals[i+b*10000][j] = (double)bytes[j+1]; } } fclose(fp); } //normalize_data_rows(d); scale_data_rows(d, 1./255); smooth_data(d); return d; } data load_go(char *filename) { FILE *fp = fopen(filename, "rb"); matrix X = make_matrix(3363059, 361); matrix y = make_matrix(3363059, 361); int row, col; if(!fp) file_error(filename); char *label; int count = 0; while((label = fgetl(fp))){ int i; if(count == X.rows){ X = resize_matrix(X, count*2); y = resize_matrix(y, count*2); } sscanf(label, "%d %d", &row, &col); char *board = fgetl(fp); int index = row*19 + col; y.vals[count][index] = 1; for(i = 0; i < 19*19; ++i){ float val = 0; if(board[i] == '1') val = 1; else if(board[i] == '2') val = -1; X.vals[count][i] = val; } ++count; free(label); free(board); } X = resize_matrix(X, count); y = resize_matrix(y, count); data d = {0}; d.shallow = 0; d.X = X; d.y = y; fclose(fp); return d; } void randomize_data(data d) { int i; for(i = d.X.rows-1; i > 0; --i){ int index = rand()%i; float *swap = d.X.vals[index]; d.X.vals[index] = d.X.vals[i]; d.X.vals[i] = swap; swap = d.y.vals[index]; d.y.vals[index] = d.y.vals[i]; d.y.vals[i] = swap; } } void scale_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ scale_array(d.X.vals[i], d.X.cols, s); } } void translate_data_rows(data d, float s) { int i; for(i = 0; i < d.X.rows; ++i){ translate_array(d.X.vals[i], d.X.cols, s); } } data copy_data(data d) { data c = {0}; c.w = d.w; c.h = d.h; c.shallow = 0; c.num_boxes = d.num_boxes; c.boxes = d.boxes; c.X = copy_matrix(d.X); c.y = copy_matrix(d.y); return c; } void normalize_data_rows(data d) { int i; for(i = 0; i < d.X.rows; ++i){ normalize_array(d.X.vals[i], d.X.cols); } } data get_data_part(data d, int part, int total) { data p = {0}; p.shallow = 1; p.X.rows = d.X.rows * (part + 1) / total - d.X.rows * part / total; p.y.rows = d.y.rows * (part + 1) / total - d.y.rows * part / total; p.X.cols = d.X.cols; p.y.cols = d.y.cols; p.X.vals = d.X.vals + d.X.rows * part / total; p.y.vals = d.y.vals + d.y.rows * part / total; return p; } data get_random_data(data d, int num) { data r = {0}; r.shallow = 1; r.X.rows = num; r.y.rows = num; r.X.cols = d.X.cols; r.y.cols = d.y.cols; r.X.vals = (float**)calloc(num, sizeof(float *)); r.y.vals = (float**)calloc(num, sizeof(float *)); int i; for(i = 0; i < num; ++i){ int index = rand()%d.X.rows; r.X.vals[i] = d.X.vals[index]; r.y.vals[i] = d.y.vals[index]; } return r; } data *split_data(data d, int part, int total) { data *split = (data*)calloc(2, sizeof(data)); int i; int start = part*d.X.rows/total; int end = (part+1)*d.X.rows/total; data train; data test; train.shallow = test.shallow = 1; test.X.rows = test.y.rows = end-start; train.X.rows = train.y.rows = d.X.rows - (end-start); train.X.cols = test.X.cols = d.X.cols; train.y.cols = test.y.cols = d.y.cols; train.X.vals = (float**)calloc(train.X.rows, sizeof(float*)); test.X.vals = (float**)calloc(test.X.rows, sizeof(float*)); train.y.vals = (float**)calloc(train.y.rows, sizeof(float*)); test.y.vals = (float**)calloc(test.y.rows, sizeof(float*)); for(i = 0; i < start; ++i){ train.X.vals[i] = d.X.vals[i]; train.y.vals[i] = d.y.vals[i]; } for(i = start; i < end; ++i){ test.X.vals[i-start] = d.X.vals[i]; test.y.vals[i-start] = d.y.vals[i]; } for(i = end; i < d.X.rows; ++i){ train.X.vals[i-(end-start)] = d.X.vals[i]; train.y.vals[i-(end-start)] = d.y.vals[i]; } split[0] = train; split[1] = test; return split; } #undef class #undef new
runpf.h
/* * runpf.cuh * * Created on: 16/10/2015 * Author: Igor M. Araújo */ #ifndef RUNPF_CUH_ #define RUNPF_CUH_ #include <float.h> #include <cuComplex.h> #include <util/reduce.h> const int NR = 0; const int FDXB = 1; const int FDBX = 2; const int MKL_DSS = 0; const int Eigen_SimplicialLLT = 1; const int Eigen_SimplicialLDLT = 2; const int Eigen_ConjugateGradient = 3; const int Eigen_BiCGSTAB = 4; const int Eigen_SparseLU = 5; const int Eigen_SparseQR = 6; const int cuSolver = 6; __constant__ int D_ALG; __constant__ int D_NBUS; __constant__ int D_NPV; __constant__ int D_NPQ; __constant__ int D_NBRANCH; __constant__ int D_THREADS; int H_ALG = NR; int H_NBUS; int H_NBRANCH; int H_NPV; int H_NPQ; int H_THREADS; int H_NTESTS; int H_LinearSolver = MKL_DSS; #define MAX_IT_NR 10 // Numero maximo de interações #define MAX_IT_FD 30 // Numero maximo de interações #define EPS 1e-8 // Erro aceitavel para condição de parada #include <util/complexUtils.h> cudaStream_t *stream = 0; Bus *buses; Branch *branches; unsigned int *pv; unsigned int *pq; Bus *device_buses; Branch *device_branches; unsigned int *device_pv; unsigned int *device_pq; cuDoubleComplex *V; int nnzYbus = 0; cuDoubleComplex *csrValYbus; int *csrRowPtrYbus; int *csrColIndYbus; int nnzYt = 0; cuDoubleComplex *csrValYt; int *csrRowPtrYt; int *csrColIndYt; int nnzYf = 0; cuDoubleComplex *csrValYf; int *csrRowPtrYf; int *csrColIndYf; int nnzYsh = 0; cuDoubleComplex *csrValYsh; int *csrRowPtrYsh; int *csrColIndYsh; int nnzCf = 0; cuDoubleComplex *csrValCf; int *csrRowPtrCf; int *csrColIndCf; int nnzCt = 0; cuDoubleComplex *csrValCt; int *csrRowPtrCt; int *csrColIndCt; int nnzCfcoo = 0; cuDoubleComplex *cooValCf; int *cooRowCf; int *cooColCf; int nnzCtcoo = 0; cuDoubleComplex *cooValCt; int *cooRowCt; int *cooColCt; size_t pBufferSizeInBytes = 0; void *pBuffer; int nPermutation; int *permutation; int nnzCfYf = 0; cuDoubleComplex *csrValCfYf; int *csrRowPtrCfYf; int *csrColIndCfYf; int nnzCtYt = 0; cuDoubleComplex *csrValCtYt; int *csrRowPtrCtYt; int *csrColIndCtYt; int nnzCfYfCtYt = 0; cuDoubleComplex *csrValCfYfCtYt; int *csrRowPtrCfYfCtYt; int *csrColIndCfYfCtYt; cusparseHandle_t sparseHandle; cusparseMatDescr_t descrCf; cusparseMatDescr_t descrYf; cusparseMatDescr_t descrCfYf; cusparseMatDescr_t descrCt; cusparseMatDescr_t descrYt; cusparseMatDescr_t descrCtYt; cusparseMatDescr_t descrCfYfCtYt; cusparseMatDescr_t descrYbus; cusparseMatDescr_t descrYsh; double *F; double *dx; cuDoubleComplex *diagIbus; int nnzJ = 0; double *csrValJ; int *csrRowPtrJ; int *csrColIndJ; int *d_cooRowJ = 0; int *cooRowJ = 0; int *h_csrRowPtrJ; int *h_csrColIndJ; bool *converged_test; double *vLoss; Bus *tmpBuses; Branch *tmpBranches; double *csrBpVal; int *csrBpCol; int *csrBpRow; double *cooBpVal; int *cooBpCol; int *cooBpRow; double *csrBppVal; int *csrBppCol; int *csrBppRow; double *cooBppVal; int *cooBppCol; int *cooBppRow; int tmpNnzYbus = 0; cuDoubleComplex *tmpCsrValYbus; int *tmpCsrRowPtrYbus; int *tmpCsrColIndYbus; int tmpNnzYt = 0; cuDoubleComplex *tmpCsrValYt; int *tmpCsrRowPtrYt; int *tmpCsrColIndYt; int tmpNnzYf = 0; cuDoubleComplex *tmpCsrValYf; int *tmpCsrRowPtrYf; int *tmpCsrColIndYf; double *P; double *Q; pso::Particula::Estrutura *d_estrutura; double *d_enxame = 0; double* dReduceLoss; double* dtReduceLoss; double* hReduceLoss; int reduceBlocks; int reduceThreads; int reduceThreadsBlocks; #include <powerflow/makeYbus.h> #include <powerflow/makeB.h> #include <powerflow/newtonpf.h> #include <powerflow/fdpf.h> __host__ void mkl_computeVoltage( Bus *buses, cuDoubleComplex *V, vector<pso::Particula::Estrutura> &estrutura, pso::Particula &particula) { #pragma omp parallel for for (int id = 0; id < H_NBUS; id++) { Bus l_bus = buses[id]; double Vbus = (l_bus.indiceEstrutura != -1 && estrutura[l_bus.indiceEstrutura].tipo == pso::Particula::Estrutura::AVR) ? particula[l_bus.indiceEstrutura] : l_bus.V ; V[id] = cuCmul(make_cuDoubleComplex(Vbus, 0), cuCexp(make_cuDoubleComplex(0, l_bus.O))); if (l_bus.type == l_bus.PV || l_bus.type == l_bus.SLACK) { V[id] = cuCmul(make_cuDoubleComplex(Vbus / cuCabs(V[id]), 0.0), V[id]); } } } __host__ double mkl_computeLoss( Branch *branches, cuDoubleComplex *V, int nnzYf, int* csrRowPtrYf, int* csrColIndYf, cuDoubleComplex* csrValYf, int nnzYt, int* csrRowPtrYt, int* csrColIndYt, cuDoubleComplex* csrValYt) { double sumLoss = 0.0; #pragma omp parallel for reduction(+:sumLoss) for ( int id = 0;id < H_NBRANCH; id++) { cuDoubleComplex sum = make_cuDoubleComplex(0, 0); cuDoubleComplex sum2 = make_cuDoubleComplex(0, 0); for(int k = csrRowPtrYf[id] - BASE_INDEX, endFor = csrRowPtrYf[id + 1] - BASE_INDEX; k < endFor; k++){ sum = cuCadd(sum, cuCmul(csrValYf[k], V[csrColIndYf[k] - BASE_INDEX])); } for(int k = csrRowPtrYt[id] - BASE_INDEX, endFor = csrRowPtrYt[id + 1] - BASE_INDEX; k < endFor; k++){ sum2 = cuCadd(sum2, cuCmul(csrValYt[k], V[csrColIndYt[k] - BASE_INDEX])); } Branch l_branch = branches[id]; cuDoubleComplex l_loss; l_loss = cuCadd(cuCmul(cuConj(sum), V[l_branch.from]), cuCmul(cuConj(sum2), V[l_branch.to])); sumLoss += cuCreal(l_loss); } return sumLoss; } __host__ double mkl_runpf(vector<pso::Particula::Estrutura> &estrutura, pso::Particula &particula) { double start; start =GetTimer(); mkl_computeVoltage( buses, V, estrutura, particula); timeTable[TIME_COMPUTEVOLTAGE] += GetTimer() - start; #ifdef DEBUG printf("V = \n"); for(int i = 0; i < H_NBUS; i++) { printf("\t[%d] -> %.4e %c %.4ei\n",i , V[i].x, ((V[i].y < 0) ? '-' : '+'), ((V[i].y < 0) ? -V[i].y : V[i].y)); } #endif start =GetTimer(); mkl_makeYbus(estrutura, particula, buses, branches); timeTable[TIME_MAKEYBUS] += GetTimer() - start; #ifdef DEBUG printf("Yf = \n"); printf("\tCompressed Sparse Column(rows = %d, cols = %d, nnz = %d [%.2lf])\n",H_NBRANCH, H_NBUS,nnzYf, nnzYf * 100.0f / (H_NBRANCH * H_NBUS)); for(int j = 0; j < H_NBUS; j++){ for(int i = 0; i < H_NBRANCH; i++){ for(int k = csrRowPtrYf[i] - BASE_INDEX; k < csrRowPtrYf[i + 1] - BASE_INDEX; k++){ if(j == csrColIndYf[k] - BASE_INDEX){ cuDoubleComplex value = csrValYf[k]; printf("\t(%d, %d)\t->\t%.4e%c%.4ei\n", i+1, j+1, value.x,((value.y < 0.0) ? '-' : '+'),((value.y < 0.0) ? -value.y : value.y)); break; } } } } printf("Yt = \n"); printf("\tCompressed Sparse Column(rows = %d, cols = %d, nnz = %d [%.2lf])\n",H_NBRANCH, H_NBUS,nnzYt, nnzYt * 100.0f / (H_NBRANCH * H_NBUS)); for(int j = 0; j < H_NBUS; j++){ for(int i = 0; i < H_NBRANCH; i++){ for(int k = csrRowPtrYt[i] - BASE_INDEX; k < csrRowPtrYt[i + 1] - BASE_INDEX; k++){ if(j == csrColIndYt[k] - BASE_INDEX){ cuDoubleComplex value = csrValYt[k]; printf("\t(%d, %d)\t->\t%.4e%c%.4ei\n", i+1, j+1, value.x,((value.y < 0.0) ? '-' : '+'),((value.y < 0.0) ? -value.y : value.y)); break; } } } } printf("Ybus = \n"); printf("\tCompressed Sparse Column(rows = %d, cols = %d, nnz = %d [%.2lf])\n",H_NBUS, H_NBUS,nnzYbus, nnzYbus * 100.0f / (H_NBUS * H_NBUS)); for(int j = 0; j < H_NBUS; j++){ for(int i = 0; i < H_NBUS; i++){ for(int k = csrRowPtrYbus[i] - BASE_INDEX; k < csrRowPtrYbus[i + 1] - BASE_INDEX; k++){ if(j == csrColIndYbus[k] - BASE_INDEX){ cuDoubleComplex value = csrValYbus[k]; printf("\t(%d, %d)\t->\t%.4e%c%.4ei\n", i+1, j+1, value.x,((value.y < 0.0) ? '-' : '+'),((value.y < 0.0) ? -value.y : value.y)); break; } } } } #endif bool converged = false; switch (H_ALG) { case NR: start =GetTimer(); converged = mkl_newtonpf(); timeTable[TIME_NEWTONPF] += GetTimer() - start; break; case FDXB: case FDBX: mkl_makeB(estrutura, particula); converged = mkl_fdpf(); break; } double loss = 0; if (converged) { start =GetTimer(); loss = mkl_computeLoss( branches, V, nnzYf, csrRowPtrYf, csrColIndYf, csrValYf, nnzYt, csrRowPtrYt, csrColIndYt, csrValYt); timeTable[TIME_COMPUTELOSS] += GetTimer() - start; } else { loss = DBL_MAX; } MKL_free(csrColIndYbus); MKL_free(csrValYbus); return loss; } __host__ void mkl_init(Topology& topology, int nTest, vector<pso::Particula::Estrutura> estrutura, int algPF) { H_NBUS = topology.buses.size(); H_NBRANCH = topology.branches.size(); H_NPV = topology.idPVbuses.size(); H_NPQ = topology.idPQbuses.size(); H_ALG = algPF; H_NTESTS = nTest; buses = thrust::raw_pointer_cast(topology.buses.data()); branches = thrust::raw_pointer_cast(topology.branches.data()); pv = thrust::raw_pointer_cast(topology.idPVbuses.data()); pq = thrust::raw_pointer_cast(topology.idPQbuses.data()); V = (cuDoubleComplex*) MKL_malloc(H_NBUS * sizeof(cuDoubleComplex), 64); nnzYf = 2 * H_NBRANCH; csrValYf = (cuDoubleComplex*) MKL_malloc(nnzYf * sizeof(cuDoubleComplex), 64); csrColIndYf = (int*) MKL_malloc(nnzYf * sizeof(int), 64); csrRowPtrYf = (int*) MKL_malloc((H_NBRANCH + 1) * sizeof(int), 64); nnzYt = 2 * H_NBRANCH; csrValYt = (cuDoubleComplex*) MKL_malloc(nnzYt * sizeof(cuDoubleComplex), 64); csrColIndYt = (int*) MKL_malloc(nnzYt * sizeof(int), 64); csrRowPtrYt = (int*) MKL_malloc((H_NBRANCH + 1) * sizeof(int), 64); nnzYsh = H_NBUS; csrValYsh = (cuDoubleComplex*) MKL_malloc(nnzYsh * sizeof(cuDoubleComplex), 64); csrColIndYsh = (int*) MKL_malloc(nnzYsh * sizeof(int), 64); csrRowPtrYsh = (int*) MKL_malloc((H_NBUS + 1) * sizeof(int), 64); nnzCf = H_NBRANCH; csrValCf = (cuDoubleComplex*) MKL_malloc(nnzCf * sizeof(cuDoubleComplex), 64); csrColIndCf = (int*) MKL_malloc(nnzCf * sizeof(int), 64); csrRowPtrCf = (int*) MKL_malloc((H_NBUS + 1) * sizeof(int), 64); nnzCt = H_NBRANCH; csrValCt = (cuDoubleComplex*) MKL_malloc(nnzCt * sizeof(cuDoubleComplex), 64); csrColIndCt = (int*) MKL_malloc(nnzCt * sizeof(int), 64); csrRowPtrCt = (int*) malloc((H_NBUS + 1) * sizeof(int)); nnzCfcoo = H_NBRANCH; cooValCf = (cuDoubleComplex*) MKL_malloc(nnzCfcoo * sizeof(cuDoubleComplex), 64); cooColCf = (int*) MKL_malloc(nnzCfcoo * sizeof(int), 64); cooRowCf = (int*) MKL_malloc(nnzCfcoo * sizeof(int), 64); nnzCtcoo = H_NBRANCH; cooValCt = (cuDoubleComplex*) MKL_malloc(nnzCtcoo * sizeof(cuDoubleComplex), 64); cooColCt = (int*) MKL_malloc(nnzCtcoo * sizeof(int), 64); cooRowCt = (int*) MKL_malloc(nnzCtcoo * sizeof(int), 64); csrRowPtrYbus = (int*) MKL_malloc((H_NBUS + 1) * sizeof(int), 64); csrRowPtrCfYf = (int*) MKL_malloc((H_NBUS + 1) * sizeof(int), 64); csrRowPtrCtYt = (int*) MKL_malloc((H_NBUS + 1) * sizeof(int), 64); csrRowPtrCfYfCtYt = (int*) MKL_malloc((H_NBUS + 1) * sizeof(int), 64); int length = H_NPV + 2 * H_NPQ; switch(H_ALG){ case NR: F = (double*)MKL_malloc(length * sizeof(double), 64); dx = (double*)MKL_malloc(length * sizeof(double), 64); diagIbus = (cuDoubleComplex*) MKL_malloc( H_NBUS * sizeof(cuDoubleComplex), 64); nnzJ = 0; csrRowPtrJ = (int*) MKL_malloc((length + 1) * sizeof(int), 64); break; case FDBX: case FDXB: csrBpRow = (int*) MKL_malloc((H_NPV + H_NPQ + 2) * sizeof(int), 64); csrBppRow = (int*) MKL_malloc( (H_NPQ + 2) * sizeof(int), 64); tmpBuses = (Bus*) MKL_malloc( H_NBUS * sizeof(Bus), 64); tmpBranches = (Branch*) MKL_malloc( H_NBRANCH * sizeof(Branch), 64); tmpNnzYf = 2 * H_NBRANCH; tmpCsrValYf = (cuDoubleComplex*) MKL_malloc( nnzYf * sizeof(cuDoubleComplex), 64); tmpCsrColIndYf = (int*) MKL_malloc( nnzYf * sizeof( int ), 64); tmpCsrRowPtrYf = (int*) MKL_malloc( (H_NBRANCH + 1) * sizeof( int ), 64); tmpNnzYt = 2 * H_NBRANCH; tmpCsrValYt = (cuDoubleComplex*) MKL_malloc( nnzYt * sizeof(cuDoubleComplex), 64); tmpCsrColIndYt = (int*) MKL_malloc( nnzYt * sizeof( int ), 64); tmpCsrRowPtrYt = (int*) MKL_malloc( (H_NBRANCH + 1) * sizeof( int ), 64); tmpCsrRowPtrYbus = (int*) MKL_malloc( (H_NBUS + 1) * sizeof( int ), 64); P = (double*) MKL_malloc( (H_NPV + H_NPQ) * sizeof( double ), 64); Q = (double*) MKL_malloc( H_NPQ * sizeof( double ), 64); break; } } __host__ void mkl_clean(){ MKL_free(V); MKL_free(csrColIndYf ); MKL_free(csrColIndYt ); MKL_free(csrColIndYsh ); MKL_free(csrColIndCt ); MKL_free(csrColIndCf ); MKL_free(cooColCt ); MKL_free(cooColCf ); MKL_free(csrRowPtrYbus ); MKL_free(csrRowPtrYf ); MKL_free(csrRowPtrYt ); MKL_free(csrRowPtrYsh ); free(csrRowPtrCt ); MKL_free(csrRowPtrCf ); MKL_free(cooRowCt ); MKL_free(cooRowCf ); MKL_free(csrRowPtrCfYf ); MKL_free(csrRowPtrCtYt ); MKL_free(csrRowPtrCfYfCtYt ); MKL_free(csrValYf ); MKL_free(csrValYt ); MKL_free(csrValYsh ); MKL_free(csrValCf ); MKL_free(csrValCt ); MKL_free(cooValCf ); MKL_free(cooValCt ); MKL_free(pBuffer); free(converged_test); switch(H_ALG){ case NR: MKL_free(F); MKL_free(dx); MKL_free(csrValJ); MKL_free(csrRowPtrJ); MKL_free(csrColIndJ); MKL_free(diagIbus); free(cooRowJ); free(h_csrColIndJ); free(h_csrRowPtrJ); break; case FDBX: case FDXB: MKL_free(cooBpRow); MKL_free(cooBpCol); MKL_free(cooBpVal); MKL_free(csrBpVal); MKL_free(csrBpCol); MKL_free(csrBpRow); MKL_free(cooBppRow); MKL_free(cooBppCol); MKL_free(cooBppVal); MKL_free(csrBppVal); MKL_free(csrBppCol); MKL_free(csrBppRow); MKL_free(tmpBuses); MKL_free(tmpBranches); MKL_free(tmpCsrColIndYbus ); MKL_free(tmpCsrColIndYf ); MKL_free(tmpCsrColIndYt ); MKL_free(tmpCsrRowPtrYbus ); MKL_free(tmpCsrRowPtrYf ); MKL_free(tmpCsrRowPtrYt ); MKL_free(tmpCsrValYbus ); MKL_free(tmpCsrValYf ); MKL_free(tmpCsrValYt ); MKL_free(P ); MKL_free(Q ); break; } } __global__ void hybrid_computeVoltage( Bus *buses, cuDoubleComplex *V, int i, pso::Particula::Estrutura *d_estrutura, double *d_enxame) { int id = ID(); if (id < D_NBUS) { Bus l_bus = buses[id]; double Vbus = (l_bus.indiceEstrutura != -1 && d_estrutura[l_bus.indiceEstrutura].tipo == pso::Particula::Estrutura::AVR) ? d_enxame[l_bus.indiceEstrutura] : l_bus.V ; V[id] = cuCmul(make_cuDoubleComplex(Vbus, 0), cuCexp(make_cuDoubleComplex(0, l_bus.O))); if (l_bus.type == l_bus.PV || l_bus.type == l_bus.SLACK) { V[id] = cuCmul(make_cuDoubleComplex(Vbus / cuCabs(V[id]), 0.0), V[id]); } } } __global__ void hybrid_computeLoss( int nTest, Branch *branches, cuDoubleComplex *V, int nnzYf, int* csrRowPtrYf, int* csrColIndYf, cuDoubleComplex* csrValYf, int nnzYt, int* csrRowPtrYt, int* csrColIndYt, cuDoubleComplex* csrValYt, double *vLoss) { int id = ID(); if (id < D_NBRANCH) { cuDoubleComplex sum = make_cuDoubleComplex(0, 0); cuDoubleComplex sum2 = make_cuDoubleComplex(0, 0); for(int k = csrRowPtrYf[id], endFor = csrRowPtrYf[id + 1]; k < endFor; k++){ sum = cuCadd(sum, cuCmul(csrValYf[k], V[csrColIndYf[k]])); } for(int k = csrRowPtrYt[id], endFor = csrRowPtrYt[id + 1]; k < endFor; k++){ sum2 = cuCadd(sum2, cuCmul(csrValYt[k], V[csrColIndYt[k]])); } Branch l_branch = branches[id]; cuDoubleComplex l_loss; l_loss = cuCadd(cuCmul(cuConj(sum), V[l_branch.from]), cuCmul(cuConj(sum2), V[l_branch.to])); vLoss[id] = cuCreal(l_loss); } } __host__ void reduceLoss(double* d_idata, double* d_odata, int threads, int blocks, int nElements, cudaStream_t* stream){ int smemSize = sizeof(double) * threads * 2; int dimGrid = blocks; int dimBlock = threads; switch (threads) { case 1024: reduce<1024><<< dimGrid, dimBlock, smemSize, *stream>>>(d_idata, d_odata, nElements); break; case 512: reduce<512><<< dimGrid, dimBlock, smemSize, *stream>>>(d_idata, d_odata, nElements); break; case 256: reduce<256><<< dimGrid, dimBlock, smemSize, *stream >>>(d_idata, d_odata, nElements); break; case 128: reduce<128><<< dimGrid, dimBlock, smemSize, *stream >>>(d_idata, d_odata, nElements); break; case 64: reduce< 64><<< dimGrid, dimBlock, smemSize, *stream >>>(d_idata, d_odata, nElements); break; case 32: reduce< 32><<< dimGrid, dimBlock, smemSize, *stream >>>(d_idata, d_odata, nElements); break; case 16: reduce< 16><<< dimGrid, dimBlock, smemSize, *stream >>>(d_idata, d_odata, nElements); break; case 8: reduce< 8><<< dimGrid, dimBlock, smemSize, *stream >>>(d_idata, d_odata, nElements); break; case 4: reduce< 4><<< dimGrid, dimBlock, smemSize, *stream >>>(d_idata, d_odata, nElements); break; case 2: reduce< 2><<< dimGrid, dimBlock, smemSize, *stream >>>(d_idata, d_odata, nElements); break; case 1: reduce< 1><<< dimGrid, threads, smemSize, *stream >>>(d_idata, d_odata, nElements); break; } } __host__ void hybrid_runpf(vector<pso::Particula::Estrutura> &estrutura, vector<pso::Particula> &enxame) { double start; start = GetTimer(); if(d_enxame == 0){ checkCudaErrors(cudaMalloc((void**) &d_enxame, sizeof(double) * enxame.size() * estrutura.size())); } for(int i = 0; i < enxame.size(); i++){ checkCudaErrors(cudaMemcpy(d_enxame + i * estrutura.size(),enxame[i].X.data(), sizeof(double) * estrutura.size(), cudaMemcpyHostToDevice)); } timeTable[TIME_INIT_STRUCT_PSO] += GetTimer() - start; start = GetTimer(); for (int i = 0; i < H_NTESTS; i++) { //checkCudaErrors(cudaStreamCreate(&stream[i])); hybrid_computeVoltage<<<BLOCKS(H_NBUS, H_THREADS), H_THREADS, 0, stream[i]>>>( device_buses, V + H_NBUS * i, i, d_estrutura, d_enxame + estrutura.size() * i); } #ifdef DEBUG checkCudaErrors(cudaDeviceSynchronize()); for (int t = 0; t < H_NTESTS; t++) { cuDoubleComplex *h_V = (cuDoubleComplex*) malloc(sizeof(cuDoubleComplex) * H_NBUS); cudaMemcpy(h_V, V + H_NBUS * t, sizeof(cuDoubleComplex) * H_NBUS, cudaMemcpyDeviceToHost); printf("V[%d] = \n", t); for(int i = 0; i < H_NBUS; i++) { printf("\t[%d] -> %.4e %c %.4ei\n",i , h_V[i].x, ((h_V[i].y < 0) ? '-' : '+'), ((h_V[i].y < 0) ? -h_V[i].y : h_V[i].y)); } free(h_V); } #endif for (int i = 0; i < H_NTESTS; i++) { hybrid_makeYbus( i, estrutura.size(), device_buses, device_branches); } checkCudaErrors(cudaDeviceSynchronize()); timeTable[TIME_COMPUTEVOLTAGE] += GetTimer() - start; timeTable[TIME_MAKEYBUS] += GetTimer() - start; #ifdef DEBUG for (int t = 0; t < H_NTESTS; t++) { int *h_row = (int*) malloc(sizeof(int) * (H_NBRANCH + 1)); int *h_col = (int*) malloc(sizeof(int) * nnzYf); cuDoubleComplex *h_val = (cuDoubleComplex*) malloc(sizeof(cuDoubleComplex) * nnzYf); cudaMemcpy(h_row, csrRowPtrYf, sizeof(int) * (H_NBRANCH + 1), cudaMemcpyDeviceToHost); cudaMemcpy(h_col, csrColIndYf, sizeof(int) * nnzYf, cudaMemcpyDeviceToHost); cudaMemcpy(h_val, csrValYf + nnzYf * t, sizeof(cuDoubleComplex) * nnzYf, cudaMemcpyDeviceToHost); printf("Yf[%d] = \n", t); printf("\tCompressed Sparse Column(rows = %d, cols = %d, nnz = %d [%.2lf])\n", H_NBRANCH, H_NBUS, nnzYf, nnzYf * 100.0f / (H_NBRANCH * H_NBUS)); for(int j = 0; j < H_NBUS; j++){ for(int i = 0; i < H_NBRANCH; i++){ for(int k = h_row[i]; k < h_row[i + 1]; k++){ if(j == h_col[k]){ cuDoubleComplex value = h_val[k]; printf("\t(%d, %d)\t->\t%.4e%c%.4ei\n", i+1, j+1, value.x,((value.y < 0.0) ? '-' : '+'),((value.y < 0.0) ? -value.y : value.y)); break; } } } } free(h_row); free(h_col); free(h_val); h_row = (int*) malloc(sizeof(int) * (H_NBRANCH + 1)); h_col = (int*) malloc(sizeof(int) * nnzYt); h_val = (cuDoubleComplex*) malloc(sizeof(cuDoubleComplex) * nnzYt); cudaMemcpy(h_row, csrRowPtrYt, sizeof(int) * (H_NBRANCH + 1), cudaMemcpyDeviceToHost); cudaMemcpy(h_col, csrColIndYt, sizeof(int) * nnzYt, cudaMemcpyDeviceToHost); cudaMemcpy(h_val, csrValYt + nnzYt * t, sizeof(cuDoubleComplex) * nnzYt, cudaMemcpyDeviceToHost); printf("Yt[%d] = \n", t); printf("\tCompressed Sparse Column(rows = %d, cols = %d, nnz = %d [%.2lf])\n", H_NBRANCH, H_NBUS, nnzYt, nnzYt * 100.0f / (H_NBRANCH * H_NBUS)); for(int j = 0; j < H_NBUS; j++){ for(int i = 0; i < H_NBRANCH; i++){ for(int k = h_row[i]; k < h_row[i + 1]; k++){ if(j == h_col[k]){ cuDoubleComplex value = h_val[k]; printf("\t(%d, %d)\t->\t%.4e%c%.4ei\n", i+1, j+1, value.x,((value.y < 0.0) ? '-' : '+'),((value.y < 0.0) ? -value.y : value.y)); break; } } } } free(h_row); free(h_col); free(h_val); h_row = (int*) malloc(sizeof(int) * (H_NBRANCH + 1)); h_col = (int*) malloc(sizeof(int) * nnzYbus); h_val = (cuDoubleComplex*) malloc(sizeof(cuDoubleComplex) * nnzYbus); cudaMemcpy(h_row, csrRowPtrYbus, sizeof(int) * (H_NBUS + 1), cudaMemcpyDeviceToHost); cudaMemcpy(h_col, csrColIndYbus, sizeof(int) * nnzYbus, cudaMemcpyDeviceToHost); cudaMemcpy(h_val, csrValYbus + nnzYbus * t, sizeof(cuDoubleComplex) * nnzYbus, cudaMemcpyDeviceToHost); printf("Ybus[%d] = \n", t); printf("\tCompressed Sparse Column(rows = %d, cols = %d, nnz = %d [%.2lf])\n", H_NBUS, H_NBUS, nnzYbus, nnzYbus * 100.0f / (H_NBUS * H_NBUS)); for(int j = 0; j < H_NBUS; j++){ for(int i = 0; i < H_NBUS; i++){ for(int k = h_row[i]; k < h_row[i + 1]; k++){ if(j == h_col[k]){ cuDoubleComplex value = h_val[k]; printf("\t(%d, %d)\t->\t%.4e%c%.4ei\n", i+1, j+1, value.x,((value.y < 0.0) ? '-' : '+'),((value.y < 0.0) ? -value.y : value.y)); break; } } } } free(h_row); free(h_col); free(h_val); } #endif switch (H_ALG) { case NR: start = GetTimer(); hybrid_newtonpf(); timeTable[TIME_NEWTONPF] += GetTimer() - start; break; case FDXB: case FDBX: hybrid_makeB(H_NTESTS, estrutura.size()); hybrid_fdpf(); break; } double loss = 0; start = GetTimer(); for(int t = 0; t < H_NTESTS; t++) { if (converged_test[t]) { hybrid_computeLoss<<<BLOCKS(H_NBRANCH, H_THREADS), H_THREADS, 0, stream[t]>>>( t, device_branches, V + t * H_NBUS, nnzYf, csrRowPtrYf, csrColIndYf, csrValYf + t * nnzYf, nnzYt, csrRowPtrYt, csrColIndYt, csrValYt + t * nnzYt, vLoss + t * H_NBRANCH); } } checkCudaErrors(cudaDeviceSynchronize()); if(reduceBlocks == 1){ for(int t = 0; t < H_NTESTS; t++){ reduceLoss(vLoss + t * H_NBRANCH, dReduceLoss + t, reduceThreads, reduceBlocks, H_NBRANCH, &stream[t]); } } else { for(int t = 0; t < H_NTESTS; t++){ reduceLoss(vLoss + t * H_NBRANCH, dtReduceLoss + t * reduceBlocks, reduceThreads, reduceBlocks, H_NBRANCH, &stream[t]); reduceLoss(dtReduceLoss + t * reduceBlocks, dReduceLoss + t, reduceThreadsBlocks, 1, reduceBlocks, &stream[t]); } } checkCudaErrors(cudaDeviceSynchronize()); checkCudaErrors(cudaMemcpy(hReduceLoss, dReduceLoss,sizeof(double) * H_NTESTS, cudaMemcpyDeviceToHost)); for(int t = 0; t < H_NTESTS; t++) { if (converged_test[t]) { loss = hReduceLoss[t]; } else { loss = DBL_MAX; } enxame[t].mudarFitness(loss); } timeTable[TIME_COMPUTELOSS] += GetTimer() - start; } __host__ void hybrid_init(Topology& topology, int nTest, int nThreads, vector<pso::Particula::Estrutura> estrutura, int algPF) { H_NBUS = topology.buses.size(); H_NBRANCH = topology.branches.size(); H_NPV = topology.idPVbuses.size(); H_NPQ = topology.idPQbuses.size(); H_ALG = algPF; H_NTESTS = nTest; H_THREADS = nThreads; checkCudaErrors(cudaMalloc((void**) &d_estrutura, sizeof(pso::Particula::Estrutura) * estrutura.size())); checkCudaErrors(cudaMemcpy(d_estrutura, estrutura.data(),sizeof(pso::Particula::Estrutura) * estrutura.size(), cudaMemcpyHostToDevice)); stream = (cudaStream_t*) malloc(sizeof(cudaStream_t) * H_NTESTS); for(int i = 0; i < H_NTESTS; i++){ checkCudaErrors(cudaStreamCreate(&stream[i])); } checkCudaErrors(cudaMemcpyToSymbol(D_NBUS, &H_NBUS, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(D_NBRANCH, &H_NBRANCH, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(D_NPV, &H_NPV, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(D_NPQ, &H_NPQ, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(D_ALG, &H_ALG, sizeof(int))); checkCudaErrors(cudaMemcpyToSymbol(D_THREADS, &H_THREADS, sizeof(int))); buses = thrust::raw_pointer_cast(topology.buses.data()); branches = thrust::raw_pointer_cast(topology.branches.data()); pv = thrust::raw_pointer_cast(topology.idPVbuses.data()); pq = thrust::raw_pointer_cast(topology.idPQbuses.data()); checkCudaErrors(cudaDeviceSetLimit(cudaLimitMallocHeapSize, 128 * 1024 * 1024)); checkCudaErrors(cudaMalloc((void**) &device_buses, H_NBUS * sizeof(Bus))); checkCudaErrors(cudaMalloc((void**) &device_branches, H_NBRANCH * sizeof(Branch))); checkCudaErrors(cudaMalloc((void**) &device_pv, H_NPV * sizeof(unsigned int))); checkCudaErrors(cudaMalloc((void**) &device_pq, H_NPQ * sizeof(unsigned int))); checkCudaErrors(cudaMemcpy(device_buses, buses, H_NBUS * sizeof(Bus), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(device_branches, branches, H_NBRANCH * sizeof(Branch), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(device_pv, pv, H_NPV * sizeof(unsigned int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMemcpy(device_pq, pq, H_NPQ * sizeof(unsigned int), cudaMemcpyHostToDevice)); checkCudaErrors(cudaMalloc((void**) &V, H_NBUS * H_NTESTS * sizeof(cuDoubleComplex))); nnzYf = 2 * H_NBRANCH; checkCudaErrors(cudaMalloc((void**) &csrValYf , nnzYf * H_NTESTS * sizeof(cuDoubleComplex))); checkCudaErrors(cudaMalloc((void**) &csrColIndYf, nnzYf * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &csrRowPtrYf, (H_NBRANCH + 1) * sizeof( int ))); nnzYt = 2 * H_NBRANCH; checkCudaErrors(cudaMalloc((void**) &csrValYt , nnzYt * H_NTESTS * sizeof(cuDoubleComplex))); checkCudaErrors(cudaMalloc((void**) &csrColIndYt, nnzYt * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &csrRowPtrYt, (H_NBRANCH + 1) * sizeof( int ))); nnzYsh = H_NBUS; checkCudaErrors(cudaMalloc((void**) &csrValYsh , nnzYsh * H_NTESTS * sizeof(cuDoubleComplex))); checkCudaErrors(cudaMalloc((void**) &csrColIndYsh , nnzYsh * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &csrRowPtrYsh , (H_NBUS + 1) * sizeof( int ))); nnzCf = H_NBRANCH; checkCudaErrors(cudaMalloc((void**) &csrValCf , nnzCf * sizeof(cuDoubleComplex))); checkCudaErrors(cudaMalloc((void**) &csrColIndCf, nnzCf * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &csrRowPtrCf, (H_NBUS + 1) * sizeof( int ))); nnzCt = H_NBRANCH; checkCudaErrors(cudaMalloc((void**) &csrValCt , nnzCt * sizeof(cuDoubleComplex))); checkCudaErrors(cudaMalloc((void**) &csrColIndCt, nnzCt * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &csrRowPtrCt, (H_NBUS + 1) * sizeof( int ))); nnzCfcoo = H_NBRANCH; checkCudaErrors(cudaMalloc((void**) &cooValCf, nnzCfcoo * sizeof(cuDoubleComplex))); checkCudaErrors(cudaMalloc((void**) &cooColCf, nnzCfcoo * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &cooRowCf, nnzCfcoo * sizeof( int ))); nnzCtcoo = H_NBRANCH; checkCudaErrors(cudaMalloc((void**) &cooValCt, nnzCtcoo * sizeof(cuDoubleComplex))); checkCudaErrors(cudaMalloc((void**) &cooColCt, nnzCtcoo * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &cooRowCt, nnzCtcoo * sizeof( int ))); checkCudaErrors(cusparseCreate(&sparseHandle)); checkCudaErrors(cusparseSetPointerMode(sparseHandle, CUSPARSE_POINTER_MODE_HOST)); nPermutation = H_NBRANCH; checkCudaErrors(cudaMalloc((void**) &permutation, H_NBRANCH * sizeof(int))); checkCudaErrors(cudaMalloc((void**) &csrRowPtrYbus, (H_NBUS + 1) * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &csrRowPtrCfYf, (H_NBUS + 1) * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &csrRowPtrCtYt, (H_NBUS + 1) * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &csrRowPtrCfYfCtYt, (H_NBUS + 1) * sizeof( int ))); int length = H_NPV + 2 * H_NPQ; switch(H_ALG){ case NR: checkCudaErrors(cudaMalloc((void**) &F, H_NTESTS * length * sizeof(double))); checkCudaErrors(cudaMalloc((void**) &dx, length * sizeof(double))); checkCudaErrors(cudaMalloc((void**) &diagIbus, H_NTESTS * H_NBUS * sizeof(cuDoubleComplex))); nnzJ = 0; checkCudaErrors(cudaMalloc((void**) &csrRowPtrJ, (length + 1) * sizeof(int))); break; case FDBX: case FDXB: checkCudaErrors(cudaMalloc((void**) &csrBpRow, (H_NPV + H_NPQ + 2) * sizeof(int))); checkCudaErrors(cudaMalloc((void**) &csrBppRow, (H_NPQ + 2) * sizeof(int))); checkCudaErrors(cudaMalloc((void**) &tmpBuses, H_NBUS * sizeof(Bus))); checkCudaErrors(cudaMalloc((void**) &tmpBranches, H_NBRANCH * sizeof(Branch))); tmpNnzYf = 2 * H_NBRANCH; checkCudaErrors(cudaMalloc((void**) &tmpCsrValYf , nnzYf * H_NTESTS * sizeof(cuDoubleComplex))); checkCudaErrors(cudaMalloc((void**) &tmpCsrColIndYf, nnzYf * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &tmpCsrRowPtrYf, (H_NBRANCH + 1) * sizeof( int ))); tmpNnzYt = 2 * H_NBRANCH; checkCudaErrors(cudaMalloc((void**) &tmpCsrValYt , nnzYt * H_NTESTS * sizeof(cuDoubleComplex))); checkCudaErrors(cudaMalloc((void**) &tmpCsrColIndYt, nnzYt * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &tmpCsrRowPtrYt, (H_NBRANCH + 1) * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &tmpCsrRowPtrYbus, (H_NBUS + 1) * sizeof( int ))); checkCudaErrors(cudaMalloc((void**) &P, (H_NPV + H_NPQ) * H_NTESTS * sizeof( double ))); checkCudaErrors(cudaMalloc((void**) &Q, H_NPQ * H_NTESTS * sizeof( double ))); break; } converged_test = (bool*) malloc(sizeof(bool) * H_NTESTS); checkCudaErrors(cudaMalloc((void**) &vLoss, H_NBRANCH * H_NTESTS * sizeof(double))); reduceBlocks = (H_NBRANCH > 2048) ? BLOCKS(H_NBRANCH, 2048) : 1; reduceBlocks = min(reduceBlocks, 2048); if(reduceBlocks != 1){ checkCudaErrors(cudaMalloc((void**) &dtReduceLoss, sizeof(double) * reduceBlocks * H_NTESTS)); if(reduceBlocks > 2048){ reduceThreadsBlocks = 1024; } else if (reduceBlocks > 1024){ reduceThreadsBlocks = 1024; }else if (reduceBlocks > 512){ reduceThreadsBlocks = 512; }else if (reduceBlocks > 256){ reduceThreadsBlocks = 256; }else if (reduceBlocks > 128){ reduceThreadsBlocks = 128; }else if (reduceBlocks > 64){ reduceThreadsBlocks = 64; }else if (reduceBlocks > 32){ reduceThreadsBlocks = 32; }else if (reduceBlocks > 16){ reduceThreadsBlocks = 16; }else if (reduceBlocks > 8){ reduceThreadsBlocks = 8; }else if (reduceBlocks > 4){ reduceThreadsBlocks = 4; }else if (reduceBlocks > 2){ reduceThreadsBlocks = 2; }else { reduceThreadsBlocks = 1; } } hReduceLoss = (double*) malloc(sizeof(double) * H_NTESTS); checkCudaErrors(cudaMalloc((void**) &dReduceLoss, sizeof(double) * H_NTESTS)); if(H_NBRANCH > 2048){ reduceThreads = 1024; } else if (H_NBRANCH > 1024){ reduceThreads = 1024; }else if (H_NBRANCH > 512){ reduceThreads = 512; }else if (H_NBRANCH > 256){ reduceThreads = 256; }else if (H_NBRANCH > 128){ reduceThreads = 128; }else if (H_NBRANCH > 64){ reduceThreads = 64; }else if (H_NBRANCH > 32){ reduceThreads = 32; }else if (H_NBRANCH > 16){ reduceThreads = 16; }else if (H_NBRANCH > 8){ reduceThreads = 8; }else if (H_NBRANCH > 4){ reduceThreads = 4; }else if (H_NBRANCH > 2){ reduceThreads = 2; }else { reduceThreads = 1; } } __host__ void hybrid_free(){ checkCudaErrors(cudaFree(V)); checkCudaErrors(cudaFree(csrColIndYbus )); checkCudaErrors(cudaFree(csrColIndYf )); checkCudaErrors(cudaFree(csrColIndYt )); checkCudaErrors(cudaFree(csrColIndYsh )); checkCudaErrors(cudaFree(csrColIndCt )); checkCudaErrors(cudaFree(csrColIndCf )); checkCudaErrors(cudaFree(cooColCt )); checkCudaErrors(cudaFree(cooColCf )); checkCudaErrors(cudaFree(csrColIndCfYf )); checkCudaErrors(cudaFree(csrColIndCtYt )); checkCudaErrors(cudaFree(csrColIndCfYfCtYt )); checkCudaErrors(cudaFree(csrRowPtrYbus )); checkCudaErrors(cudaFree(csrRowPtrYf )); checkCudaErrors(cudaFree(csrRowPtrYt )); checkCudaErrors(cudaFree(csrRowPtrYsh )); checkCudaErrors(cudaFree(csrRowPtrCt )); checkCudaErrors(cudaFree(csrRowPtrCf )); checkCudaErrors(cudaFree(cooRowCt )); checkCudaErrors(cudaFree(cooRowCf )); checkCudaErrors(cudaFree(csrRowPtrCfYf )); checkCudaErrors(cudaFree(csrRowPtrCtYt )); checkCudaErrors(cudaFree(csrRowPtrCfYfCtYt )); checkCudaErrors(cudaFree(csrValYbus )); checkCudaErrors(cudaFree(csrValYf )); checkCudaErrors(cudaFree(csrValYt )); checkCudaErrors(cudaFree(csrValYsh )); checkCudaErrors(cudaFree(csrValCf )); checkCudaErrors(cudaFree(csrValCt )); checkCudaErrors(cudaFree(cooValCf )); checkCudaErrors(cudaFree(cooValCt )); checkCudaErrors(cudaFree(csrValCtYt )); checkCudaErrors(cudaFree(csrValCfYf )); checkCudaErrors(cudaFree(csrValCfYfCtYt )); checkCudaErrors(cudaFree(pBuffer )); checkCudaErrors(cudaFree(permutation )); checkCudaErrors(cusparseDestroy(sparseHandle)); checkCudaErrors(cusparseDestroyMatDescr(descrCf)); checkCudaErrors(cusparseDestroyMatDescr(descrYf)); checkCudaErrors(cusparseDestroyMatDescr(descrCfYf)); checkCudaErrors(cusparseDestroyMatDescr(descrCt)); checkCudaErrors(cusparseDestroyMatDescr(descrYt)); checkCudaErrors(cusparseDestroyMatDescr(descrCtYt)); checkCudaErrors(cusparseDestroyMatDescr(descrCfYfCtYt)); checkCudaErrors(cusparseDestroyMatDescr(descrYsh)); checkCudaErrors(cusparseDestroyMatDescr(descrYbus)); switch(H_ALG){ case NR: checkCudaErrors(cudaFree(F)); checkCudaErrors(cudaFree(csrValJ)); checkCudaErrors(cudaFree(csrRowPtrJ)); checkCudaErrors(cudaFree(csrColIndJ)); checkCudaErrors(cudaFree(d_cooRowJ)); checkCudaErrors(cudaFree(dx)); free(h_csrColIndJ); free(h_csrRowPtrJ); break; case FDBX: case FDXB: checkCudaErrors(cudaFree(cooBpRow)); checkCudaErrors(cudaFree(cooBpCol)); checkCudaErrors(cudaFree(cooBpVal)); checkCudaErrors(cudaFree(csrBpVal)); checkCudaErrors(cudaFree(csrBpCol)); checkCudaErrors(cudaFree(csrBpRow)); checkCudaErrors(cudaFree(cooBppRow)); checkCudaErrors(cudaFree(cooBppCol)); checkCudaErrors(cudaFree(cooBppVal)); checkCudaErrors(cudaFree(csrBppVal)); checkCudaErrors(cudaFree(csrBppCol)); checkCudaErrors(cudaFree(csrBppRow)); checkCudaErrors(cudaFree(tmpBuses)); checkCudaErrors(cudaFree(tmpBranches)); checkCudaErrors(cudaFree(tmpCsrColIndYbus )); checkCudaErrors(cudaFree(tmpCsrColIndYf )); checkCudaErrors(cudaFree(tmpCsrColIndYt )); checkCudaErrors(cudaFree(tmpCsrRowPtrYbus )); checkCudaErrors(cudaFree(tmpCsrRowPtrYf )); checkCudaErrors(cudaFree(tmpCsrRowPtrYt )); checkCudaErrors(cudaFree(tmpCsrValYbus )); checkCudaErrors(cudaFree(tmpCsrValYf )); checkCudaErrors(cudaFree(tmpCsrValYt )); checkCudaErrors(cudaFree(P )); checkCudaErrors(cudaFree(Q )); break; } checkCudaErrors(cudaFree(diagIbus)); checkCudaErrors(cudaFree(d_estrutura)); checkCudaErrors(cudaFree(d_enxame)); free(converged_test); free(stream); //checkCudaErrors(cudaFree(vLoss)); cudaDeviceReset(); } #endif /* RUNPF_CUH_ */
omp-taskloop-yield.c
#include <omp.h> #include <unistd.h> #include <stdio.h> #define THREADS 2 #define LEN 25 #define USLEEP 3 #define SLEEP 30 void long_task(void) { int k = 0; for (k=0; k<1000; k++) { #pragma omp taskyield usleep(USLEEP); } } int main(void) { int j=0; #pragma omp parallel num_threads(THREADS) #pragma omp single #pragma omp taskloop grainsize(1) for (j=0; j<LEN; j++) { if (j<THREADS) { long_task(); } else { usleep(SLEEP); } } return 0; }
GB_unop__abs_uint16_uint16.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB (_unop_apply__abs_uint16_uint16) // op(A') function: GB (_unop_tran__abs_uint16_uint16) // C type: uint16_t // A type: uint16_t // cast: uint16_t cij = aij // unaryop: cij = aij #define GB_ATYPE \ uint16_t #define GB_CTYPE \ uint16_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = x ; // casting #define GB_CAST(z, aij) \ uint16_t z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ uint16_t aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ uint16_t z = aij ; \ Cx [pC] = z ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ABS || GxB_NO_UINT16) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_apply__abs_uint16_uint16) ( uint16_t *Cx, // Cx and Ax may be aliased const uint16_t *Ax, const int8_t *restrict Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = z ; } } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; uint16_t aij = Ax [p] ; uint16_t z = aij ; Cx [p] = z ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB (_unop_tran__abs_uint16_uint16) ( GrB_Matrix C, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
1.norace6.c
// RUN: clang %loadLLOV %s -o /dev/null 2>&1 | FileCheck %s #include <omp.h> #define N 20 int main() { int A[N][N]; #pragma omp parallel for schedule(runtime) for (int i = 1; i < N; i++) for (int j = 1; j < N; j++) A[i][j] = A[i][j - 1]; } // CHECK: Region is Data Race Free. // END
genome.c
/* This file is part of Primer Pooler (c) Silas S. Brown. For Wen. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #ifdef __APPLE__ #define _FORTIFY_SOURCE 0 /* Mac OS 10.7 OpenMP bug workaround */ #endif #include <stdio.h> #include <stdlib.h> #include <string.h> #include <stdint.h> #include <time.h> #include <assert.h> #include "ansi.h" #include "bit-basics.h" #include "debug.h" #include "openmp.h" #include "memcheck.h" #include "numbers.h" #define PARALLELIZE_CHROMOSOMES 1 typedef uint32_t b32; static inline b32 byteSwap32(b32 i) { switch(0){case 0:case sizeof(b32)==4:;} /* must be EXACTLY 4 bytes for this code to work */ i = (i>>16) | (i<<16); return ((i&0xFF00FF00)>>8) | ((i&0x00FF00FF)<<8); } static b32 read2bSwap(FILE *f,int byteSwap) { b32 p; if(!fread(&p,sizeof(p),1,f)) { fputs("Read error\n",stderr); return 0; } if(byteSwap) p=byteSwap32(p); return p; } static int read_2bit_nSeqs(FILE *f,int *byteSwap,long *seqPtr) { switch(0){case 0:case sizeof(b32)==4:;} /* as above */ switch(read2bSwap(f,0)) { case 0x1A412743: *byteSwap=0; break; case 0x4327411A: *byteSwap=1; break; default: fputs("Invalid signature (is this really a .2bit genome file?)\n",stderr); return 0; } if(read2bSwap(f,0 /* *byteSwap, but doesn't matter if we're comparing result against 0 */)) { fputs("Invalid version\n",stderr); return 0; } int seqCount = read2bSwap(f,*byteSwap); read2bSwap(f,0); // reserved (again *byteSwap not nec) *seqPtr = ftell(f); return seqCount; } typedef char SeqName[256]; SeqName lastSequenceNameRead={0}; int variants_skipped; static int is_variant() { if (strchr(lastSequenceNameRead,'_') != NULL || strchr(lastSequenceNameRead,'-') != NULL) { ++variants_skipped; return 1; } else return 0; } static int read_2bit_nBases(FILE *f,int byteSwap,long *seqPtr,b32* *unknownStart,b32* *unknownLen,b32 *nBases,int *isVariant,int ignoreVars) { if (fseek(f,*seqPtr,SEEK_SET)) { fputs("Seek error reading sequence offset\n",stderr); return 0; } int seqNameLen = getc(f); if(!fread(lastSequenceNameRead,1,seqNameLen,f)) { fputs("Error reading sequence name\n",stderr); return 0; } lastSequenceNameRead[seqNameLen]=0; if(ignoreVars) *isVariant = is_variant(); else *isVariant = 0; #ifdef Debug_ChromosomeCheck *isVariant = strcmp(lastSequenceNameRead,Debug_ChromosomeCheck); /* treat any OTHER chromosome as a variant we won't read, for debugging with just one */ #endif long offset=read2bSwap(f,byteSwap); if(!offset) { fputs("No sequence offset\n",stderr); return 0; } *seqPtr = ftell(f); if(fseek(f,offset,SEEK_SET)) { fputs("Seek error loading sequence\n",stderr); return 0; } *nBases=read2bSwap(f,byteSwap); #ifdef Debug_ChromosomeCheck if(!*isVariant) fprintf(stderr,"Chromosome size is %d\n",*nBases); #endif b32 nUnknown = read2bSwap(f,byteSwap); *unknownStart = malloc(sizeof(b32)*(nUnknown+1)); /* we'll add an extra one at the end so don't have to keep checking unknownPtr<nUnknown */ *unknownLen = malloc(sizeof(b32)*(nUnknown+1)); if(memFail(*unknownLen,*unknownStart,_memFail)) return 0; b32 i; for(i=0; i<nUnknown; i++) (*unknownStart)[i]=read2bSwap(f,byteSwap); (*unknownStart)[i]=*nBases; for(i=0; i<nUnknown; i++) { (*unknownLen)[i] = read2bSwap(f,byteSwap); assert((*unknownLen)[i]); } (*unknownLen)[i]=0; /* if get unknownLen==0, need to adjust the 'else' branch of 'baseNo < *unknownStart' below to allow for this possibility (or just remove that block, running mmove on unknownStart to keep it in sync) */ if(fseek(f,read2bSwap(f,byteSwap)*4*2 + 4,SEEK_CUR)) // skip 'masked blocks' (TODO: are these ever relevant?) and the 4-byte 'reserved' word { fputs("Seek error skipping mask\n",stderr); return 0; } return 1; // and ftell = start; *seqPtr = next seq, unknown.. needs free } static inline void addBase(bit64 *buf,bit64 *valid,unsigned char byte,int *basesLeftInByte) { /* This function has to be FAST: seriously inner-loop. For ease of binary search in amplicons.c, bases are shifted into buf from LEFT (not right as in 64.h), so buf contains the last few bases IN REVERSE from the genome cursor (which is an 'end' cursor). */ *buf = ((*buf) >> 2) | ((((bit64)byte >> (2*--*basesLeftInByte)) & (bit64)3) << 62); if(*valid != ~(bit64)0) /* (only at start, so put the 'if' around it to save a couple of instructions) */ *valid = ((*valid) >> 2) | ((bit64)3 << 62); } static inline void addFastaBase(bit64 *buf,bit64 *valid,char letter) { int basesLeftInByte = 1, twobit; switch(letter) { case 'C': case 'c': twobit = 1; break; case 'A': case 'a': twobit = 2; break; case 'G': case 'g': twobit = 3; break; case ' ': case '\t': case '\r': case '\n': return; default: twobit = 0; /* TODO: might be degenerate bases like 'M'; use these in nonspecific-amplicon checks? (but hopefully they're using .2bit anyway) */ } addBase(buf,valid,twobit,&basesLeftInByte); } void look(bit64,bit64,int,b32); /* could pass this into go_through_genome as a pointer, but doing so will slow us down; there's only one in the program so let's let -flto consider it for inlining */ int allocateSeqs(int nSeq); /* ditto */ int allocateAnotherSeq(int nSeq); /* ditto */ static int is_fasta(FILE *f) { // Check if we're looking at a FASTA file instead of a 2bit file. // Assume FASTA will begin with '>' or newline + '>' or BOM + '>' // If FASTA is detected, seek past the first '>'. Otherwise rewind. char dat[5]; if(!fread(dat,4,1,f)) *dat=0; rewind(f); dat[4]=0; if (strspn(dat,"\r\n\xef\xbb\xbf>")) { while(fgetc(f)!='>') if(feof(f)) { fprintf(stderr,"FASTA file with no sequences??\n"); rewind(f); return 0; } return 1; } else return 0; } static void readFastaSeqName(FILE *f) { // assume 'f' is positioned just after the '>' if(!fgets(lastSequenceNameRead,sizeof(SeqName),f)) *lastSequenceNameRead = 0; if(lastSequenceNameRead[strlen(lastSequenceNameRead)-1]!='\n') { while (fgetc(f) != '\n') { if(feof(f)) break; } } lastSequenceNameRead[strcspn(lastSequenceNameRead," \t\r\n")]=0; } static void takeFastaSeqName(FILE *f,const char *buf) { strcpy(lastSequenceNameRead,buf+1); /* ignore '>' */ lastSequenceNameRead[strcspn(lastSequenceNameRead," \t\r\n")]=0; if(buf[strlen(buf)-1]!='\n') { while (fgetc(f) != '\n') { if(feof(f)) break; } } } static SeqName* fasta_genome(FILE *f,int ignoreVars) { fprintf(stderr,"Reading genome from FASTA file\n(slower than .2bit; may take time)\n"); int seqNo=0; char buf[80]; SeqName *seqNames=NULL; while(!feof(f)) { if(seqNo) takeFastaSeqName(f,buf); else readFastaSeqName(f); if(ignoreVars && is_variant()) { while(fgets(buf,sizeof(buf),f) && *buf != '>'); continue; } fprintf(stderr,"%s\n",lastSequenceNameRead); SeqName *lastSeqNames = seqNames; seqNames = realloc(seqNames,(seqNo+1)*sizeof(SeqName)); if(!seqNames || allocateAnotherSeq(seqNo+1)==seqNo) { free(lastSeqNames); fprintf(stderr,"Genome metadata: Out of memory!\n"); break; } wrapped_memcpy(seqNames[seqNo],lastSequenceNameRead,sizeof(SeqName)); bit64 curBuf=0,curValid=0; b32 baseNo=0; while(fgets(buf,sizeof(buf),f) && *buf != '>') { char *b; for(b=buf; *b; b++) { switch (*b) { case 'N': case 'n': curBuf = curValid = 0; break; default: addFastaBase(&curBuf,&curValid,*b); look(curBuf,curValid,seqNo,baseNo); } baseNo++; } } seqNo++; } fprintf(stderr,"End of FASTA genome scan\n"); return seqNames; } SeqName* go_through_genome(FILE *f,int ignoreVars) { variants_skipped = 0; if(is_fasta(f)) return fasta_genome(f,ignoreVars); int byteSwap=0; long seqPtr=0; // =0 to suppress warning int nSeq=read_2bit_nSeqs(f,&byteSwap,&seqPtr); if(!allocateSeqs(nSeq)) return NULL; int seqsDone = 0; SeqName *seqNames=NULL; int numSeqNames=0; time_t start = time(NULL); time_t nextDisplay = start + 1; if(omp_get_max_threads() > 1) { fprintf(stderr,"Parallelising scan (up to %d chromosomes simultaneously)\n",omp_get_max_threads()); } int seqNo; char progressBuf[80]={0}; /* TODO: different screen widths? (low priority because it could just scroll, but 8 cores on a narrow terminal could be messy) */ enum { ProgWidthPerThread = 13 /* sequence name width is this minus 5; try to divide into the screen width and also allow for the possiblity of narrower terminals */ }; #if PARALLELIZE_CHROMOSOMES && defined(_OPENMP) #pragma omp parallel for schedule(dynamic) #endif for(seqNo=0;seqNo<nSeq;seqNo++) { time_t nextThreadUpdate = time(NULL)+1; int tNum = omp_get_thread_num(); char *pgBuf = ((tNum+1)*ProgWidthPerThread > sizeof(progressBuf)) ? NULL : (progressBuf+tNum*ProgWidthPerThread); int isRHS=(tNum==omp_get_num_threads()-1) || ((tNum+2)*ProgWidthPerThread)>sizeof(progressBuf); b32 *allUnknownStart=0,*allUnknownLen=0,nBases=0; /* =0 for old compilers (don't warn) */ int isVariant,renumberedSeqNo=0; /* =0 for old compilers (don't warn) */ #if PARALLELIZE_CHROMOSOMES && defined(_OPENMP) long ft=0; /* =0 for old compilers (don't warn) */ #endif b32 baseNo = 0, *unknownStart=0, *unknownLen=0; /* last two =0 for old compilers */ int bufBytes=0;/* =0 for old compilers (don't warn) */ char *buf=NULL; if(numSeqNames && !seqNames) continue; /* would have been 'break' below w/out OpenMP */ #if PARALLELIZE_CHROMOSOMES && defined(_OPENMP) #pragma omp critical #endif { if(read_2bit_nBases(f,byteSwap,&seqPtr,&allUnknownStart,&allUnknownLen,&nBases,&isVariant,ignoreVars)) { if(isVariant) { free(allUnknownStart); free(allUnknownLen); } else { if((renumberedSeqNo = seqsDone++) >= numSeqNames) { numSeqNames = (numSeqNames+1)<<1; seqNames = realloc(seqNames,numSeqNames*sizeof(SeqName)); memFail(seqNames,allUnknownLen,allUnknownStart,_memFail); } if(seqNames) wrapped_memcpy(seqNames[renumberedSeqNo],lastSequenceNameRead,sizeof(SeqName)); unknownStart=allUnknownStart;unknownLen=allUnknownLen; bufBytes = nBases/4;if(bufBytes) buf=malloc(bufBytes); if(buf) { if(!fread(buf,1,bufBytes,f)) { fprintf(stderr,"\nError reading %d bytes (current pointer is &%lx)\nCorrupt genome file?\n",bufBytes,ftell(f)); free(buf); if(seqNames) free(seqNames); seqNames=NULL; /* would be 'return NULL' w/out OpenMP */ } } else bufBytes=0; #if PARALLELIZE_CHROMOSOMES && defined(_OPENMP) if(f) ft = ftell(f); #endif } } else isVariant=1; } if(isVariant || !seqNames) continue; int bufPtr = 0; bit64 curBuf=0,curValid=0; int basesLeftInByte = 0; unsigned char byte = 0; // =0 to suppress warning while(baseNo<nBases) { /* OK, let's go through all the bases. And try not to think of the daft 2001-ish "Internet meme". TODO: it might be possible to manually unroll this loop a bit so fewer tests are needed on baseNo etc. */ if(!basesLeftInByte) { if(bufPtr==bufBytes) #if PARALLELIZE_CHROMOSOMES && defined(_OPENMP) #pragma omp critical #endif { #if PARALLELIZE_CHROMOSOMES && defined(_OPENMP) fseek(f,ft++,SEEK_SET); #endif byte = getc(f); } else byte=buf[bufPtr++]; basesLeftInByte = 4; if(pgBuf && time(NULL) >= nextThreadUpdate) #if PARALLELIZE_CHROMOSOMES && defined(_OPENMP) #pragma omp critical #endif { int outputted=sprintf(pgBuf,"%*s %2d%%",ProgWidthPerThread-5,seqNames[renumberedSeqNo],(int)((float)baseNo*100.0/(float)nBases)); if(!isRHS) pgBuf[outputted]=' '; nextThreadUpdate = time(NULL)+1; } if(time(NULL) >= nextDisplay) #if PARALLELIZE_CHROMOSOMES && defined(_OPENMP) #pragma omp critical #endif // (this comment added to work around an auto-indent bug in some Emacs versions) { if(time(NULL) >= nextDisplay) { if(omp_get_num_threads()==1) fputs("\rScanning ",stderr); else fputs("\r",stderr); /* Scanning message will have been printed above, and we have many columns to worry about */ fputs(progressBuf,stderr); fflush(stderr); nextDisplay = time(NULL) + 2; }} } if(baseNo < *unknownStart) { addBase(&curBuf,&curValid,byte,&basesLeftInByte); look(curBuf,curValid,renumberedSeqNo,++baseNo); } else { /* we're in an 'unknown' region */ --basesLeftInByte; /* ignore this one */ if(++baseNo == *unknownStart + *unknownLen) { unknownStart++; unknownLen++; curBuf = curValid = 0; } } } if(buf) free(buf); free(allUnknownStart); free(allUnknownLen); if(pgBuf) memset(pgBuf,' ',ProgWidthPerThread-isRHS); } fprintf(stderr,"\rGenome scan complete"); prnSeconds((long)(time(NULL)-start)); fputs(clearEOL(),stderr); fputs("\n",stderr); return seqNames; } void output_genome_segment(FILE *f,int targetRenumberedSeqNo,b32 baseStart,int nBases,FILE *out,int ignoreVars) { /* Cut-down version of go_through_genome for use in reports */ int byteSwap=0; long seqPtr=0; rewind(f); int nSeq=read_2bit_nSeqs(f,&byteSwap,&seqPtr); int seqsDone = 0, seqNo; for(seqNo=0;seqNo<nSeq;seqNo++) { int isVariant; b32 *allUnknownStart=0,*allUnknownLen=0, nb0; if(read_2bit_nBases(f,byteSwap,&seqPtr,&allUnknownStart,&allUnknownLen,&nb0,&isVariant,ignoreVars)) { free(allUnknownStart); free(allUnknownLen); if (!isVariant && (seqsDone++ == targetRenumberedSeqNo)) { fseek(f,--baseStart/4,SEEK_CUR); // 1st is 0 not 1 int shift = 2*(baseStart & 3); int mask = (128|64) >> shift; shift = 6 - shift; do { unsigned char byte = getc(f); while(mask && nBases) { fputc("TCAG"[(byte >> shift) & 3], out); nBases--; shift-=2; mask>>=2; } mask = 128|64; shift = 6; } while(nBases); break; }} } }
templatemath.h
/* * templatemath.h * * Created on: Jan 1, 2016 * Author: agibsonccc */ #ifndef TEMPLATEMATH_H_ #define TEMPLATEMATH_H_ #include <math.h> #include <cmath> #include <dll.h> #include <pointercast.h> #define HALF_MAX_VALUE 65504. #define FLOAT_MAX_VALUE 3.4028235E38 #define DOUBLE_MAX_VALUE 1.7976931348623157E308 #define FLOAT_MIN_NORMAL 1.17549435e-38 #ifdef __CUDACC__ #include <types/float16.h> #define math_def __host__ __device__ #ifdef CUDA_9 struct HALFS{ half H; half L; __host__ __device__ HALFS() {}; __host__ __device__ ~HALFS() {}; }; union PAIR { HALFS B; int W; __host__ __device__ PAIR() {}; __host__ __device__ ~PAIR(){} }; #else typedef union { struct { half H; half L; } B; int W; } PAIR; #endif // cuda_9 #else #define math_def #include <types/float16.h> #endif namespace nd4j { #ifdef __CUDACC__ #endif namespace math { template<typename T> math_def inline T nd4j_abs(T value); template<typename T> math_def inline void nd4j_swap(T &val1, T &val2); template<typename T> math_def inline T nd4j_max(T val1, T val2); template<typename T> math_def inline T nd4j_min(T val1, T val2); template<typename T> math_def inline T nd4j_rint(T val1); template<typename T> math_def inline T nd4j_copysign(T val1, T val2); //#ifndef __CUDACC__ template<typename T> math_def inline T nd4j_dot(T *x, T *y, int length); //#endif template<typename T> math_def inline T nd4j_ceil(T val1); template<typename T> math_def inline bool nd4j_isnan(T val1); template<typename T> math_def inline bool nd4j_isinf(T val1); template<typename T> math_def inline bool nd4j_isfin(T val1); template<typename T> math_def inline T nd4j_cos(T val); template<typename T> math_def inline T nd4j_cosh(T val); template<typename T> math_def inline T nd4j_exp(T val); template<typename T> math_def inline T nd4j_floor(T val); template<typename T> math_def inline T nd4j_log(T val); template<typename T> math_def inline T nd4j_pow(T val, T val2); template<typename T> math_def inline T nd4j_round(T val); template<typename T> math_def inline T nd4j_remainder(T num, T denom); template<typename T> math_def inline T nd4j_fmod(T num, T denom); template<typename T> math_def inline T nd4j_erf(T num); template<typename T> math_def inline T nd4j_erfc(T num); template<typename T> math_def inline T nd4j_sigmoid(T val) { return (T) 1.0 / ((T) 1.0 + nd4j_exp<T>(-val)); } template<typename T> math_def inline T nd4j_elu(T val) { if (val >= (T) 0.0) return val; else return nd4j_exp<T>(val) - (T) 1.0; //return val >= 0.0 ? val : (nd4j_exp<T>(val) - 1.0); } template<typename T> math_def inline T nd4j_leakyrelu(T val,T alpha) { if (val < (T) 0.0f) return alpha * val; else return val; //return val < 0 ? alpha * val : val; } template<typename T> math_def inline T nd4j_eluderivative(T val) { if (val >= (T) 0.0f) return (T) 1.0f; else return nd4j_exp<T>(val); //return val >= 0.0 ? 1.0 : nd4j_exp(val); } template<typename T> math_def inline T nd4j_sin(T val); template<typename T> math_def inline T nd4j_sinh(T val); template<typename T> math_def inline T softplus(T val) { return nd4j_log<T>((T) 1.0f + nd4j_exp<T>(val)); } template<typename T> math_def inline T nd4j_softsign(T val) { return val / ((T) 1.0f + nd4j::math::nd4j_abs<T>(val)); } template<typename T> math_def inline T nd4j_sqrt(T val); template<typename T> math_def inline T nd4j_tanh(T val); template<typename T> math_def inline T nd4j_tan(T val); template<typename T> math_def inline T nd4j_atan2(T val1, T val2); template<> math_def inline float16 nd4j_atan2<float16>(float16 value1, float16 value2) { return (float16) atan2f((float) value1, (float) value2); } template<> math_def inline float nd4j_atan2<float>(float value1, float value2) { return atan2f(value1, value2); } template<> math_def inline double nd4j_atan2<double>(double value1, double value2) { return atan2(value1, value2); } template<typename T> math_def inline T nd4j_tan(T val) { return nd4j_log((val + 1 / (1 - val)) * 0.5); } template<typename T> math_def inline T nd4j_tanhderivative(T val) { T tanh = nd4j_tanh(val); return (T) 1.0f - tanh * tanh; } template<typename T> math_def inline T nd4j_sigmoidderivative(T val) { T sigmoid = nd4j_sigmoid(val); T out = sigmoid * ((T) 1.0f - sigmoid); return out; } template<typename T> math_def inline T nd4j_softsignderivative(T val) { T y = (T) 1.0f + nd4j_abs(val); return (T) 1.0f / (y * y); } template<typename T> math_def inline T nd4j_sgn(T val) { return val < (T) 0.0f ? (T) -1.0f : val > (T) 0.0f ? (T) 1.0f : (T) 0.0f; } template<typename T> math_def inline T nd4j_sign(T val) { return nd4j_sgn<T>(val); } template<typename T> math_def inline T nd4j_signum(T val) { return nd4j_sgn<T>(val); } //#ifndef __CUDACC__ template<> math_def inline float16 nd4j_dot<float16>(float16 *x, float16 *y, int length) { float16 dot = (float16) 0.0f; // TODO: since we can't use simd on unions, we might use something else here. for(int e = 0; e < length; e++) { dot += x[e] * y[e]; } return dot; } template<typename T> math_def inline T nd4j_dot(T *x, T *y, int length) { T dot = (T) 0.0f; #pragma omp simd reduction(+:dot) for(int e = 0; e < length; e++) { dot += x[e] * y[e]; } return dot; } //#endif template<typename T> math_def inline T nd4j_acos(T val); template<typename T> math_def inline T nd4j_acosh(T val); template<typename T> math_def inline T nd4j_asin(T val); template<typename T> math_def inline T nd4j_asinh(T val); template<typename T> math_def inline T nd4j_asinh(T val) { //Math.log(Math.sqrt(Math.pow(x, 2) + 1) + x) return nd4j_log(nd4j_sqrt(nd4j_pow(val, (T) 2) + (T) 1) + val); } template<typename T> math_def inline T nd4j_atan(T val); template<typename T> math_def inline T nd4j_atanh(T val); template<> math_def inline float16 nd4j_abs<float16>(float16 value) { #ifdef NATIVE_HALFS return value < 0. ? __hneg(value.data) : value; #else return (float16) fabsf((float) value); #endif } template<> math_def inline float nd4j_abs<float>(float value) { return fabsf(value); } template<> math_def inline double nd4j_abs<double>(double value) { return value < 0 ? -value : value; } template<> math_def inline int nd4j_abs<int>(int value) { return value < 0 ? -value : value; } template<> math_def inline Nd4jIndex nd4j_abs<Nd4jIndex>(Nd4jIndex value) { return value < 0 ? -value : value; } template<> math_def inline float16 nd4j_rint<float16>(float16 value) { return (float16) rintf((float) value); } template<> math_def inline float nd4j_rint<float>(float value) { return rintf(value); } template<> math_def inline double nd4j_rint<double>(double value) { return rint(value); } template<> math_def inline int nd4j_rint<int>(int value) { return value; } template<> math_def inline Nd4jIndex nd4j_rint<Nd4jIndex>(Nd4jIndex value) { return value; } template<> math_def inline bool nd4j_isnan<float16>(float16 value) { return *(value.data.getXP()) == 0x7fffU; } template<> math_def inline bool nd4j_isnan<float>(float value) { return value != value; } template<> math_def inline bool nd4j_isnan<double>(double value) { return value != value; } template<> math_def inline bool nd4j_isnan<int>(int value) { return false; } template<> math_def inline bool nd4j_isnan<Nd4jIndex>(Nd4jIndex value) { return false; } template<> math_def inline bool nd4j_isinf<float16>(float16 value) { return value < (float16) -HALF_MAX_VALUE || value > (float16) HALF_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<float>(float value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -FLOAT_MAX_VALUE || value > FLOAT_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<double>(double value) { #ifdef __CUDACC__ return isinf(value); #else return std::isinf(value); #endif //return value < -DOUBLE_MAX_VALUE || value > DOUBLE_MAX_VALUE; } template<> math_def inline bool nd4j_isinf<int>(int value) { return false; } template<> math_def inline bool nd4j_isinf<Nd4jIndex>(Nd4jIndex value) { return false; } template<typename T> math_def inline bool nd4j_isfin(T value) { return !nd4j_isnan<T>(value) && !nd4j_isinf<T>(value); } template<> math_def inline float16 nd4j_copysign<float16>(float16 val1, float16 val2) { return (float16) copysignf((float) val1, (float) val2); } template<> math_def inline float nd4j_copysign<float>(float val1, float val2) { return copysignf(val1, val2); } template<> math_def inline double nd4j_copysign<double>(double val1, double val2) { return copysign(val1, val2); } template<> math_def inline int nd4j_copysign<int>(int val1, int val2) { if (val2 < 0) return -(nd4j_abs<int>(val1)); else return nd4j_abs<int>(val1); } template<> math_def inline Nd4jIndex nd4j_copysign<Nd4jIndex>(Nd4jIndex val1, Nd4jIndex val2) { if (val2 < 0) return -(nd4j_abs<Nd4jIndex>(val1)); else return nd4j_abs<Nd4jIndex>(val1); } template<> math_def inline float16 nd4j_max<float16>(float16 val1, float16 val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline float nd4j_max<float>(float val1, float val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline double nd4j_max<double>(double val1, double val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline int nd4j_max<int>(int val1, int val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline Nd4jIndex nd4j_max<Nd4jIndex>(Nd4jIndex val1, Nd4jIndex val2) { return val1 > val2 ? val1 : val2; } template<> math_def inline Nd4jIndex nd4j_min<Nd4jIndex>(Nd4jIndex val1, Nd4jIndex val2) { return val1 < val2 ? val1 : val2; } template<> math_def inline float16 nd4j_min<float16>(float16 val1, float16 val2) { return val1 < val2 ? val1 : val2; } template<> math_def inline float nd4j_min<float>(float val1, float val2) { return val1 < val2 ? val1 : val2; } template<> math_def inline double nd4j_min<double>(double val1, double val2) { return val1 < val2 ? val1 : val2; } template<> math_def inline int nd4j_min<int>(int val1, int val2) { return val1 < val2 ? val1 : val2; } template<> math_def inline float16 nd4j_ceil<float16>(float16 val) { #ifdef NATIVE_HALFS return hceil(val.data) #else return ceilf((float) val); #endif } template<> math_def inline float nd4j_ceil<float>(float val1) { return ceilf(val1); } template<> math_def inline double nd4j_ceil<double>(double val) { return ceil(val); } template<> math_def inline int nd4j_ceil<int>(int val) { return ceil((float) val); } template<> math_def inline float16 nd4j_cos<float16>(float16 val) { #ifdef NATIVE_HALFS return hcos(val.data); #else return cosf((float) val); #endif } template<> math_def inline float nd4j_cos<float>(float val) { return cosf(val); } template<> math_def inline double nd4j_cos<double>(double val) { return cos(val); } template<> math_def inline int nd4j_cos<int>(int val) { return cosf((float) val); } template<> math_def inline float16 nd4j_cosh<float16>(float16 val) { return coshf((float) val); } template<> math_def inline float nd4j_cosh<float>(float val) { return coshf(val); } template<> math_def inline double nd4j_cosh<double>(double val) { return cosh(val); } template<> math_def inline int nd4j_cosh<int>(int val) { return coshf((float) val); } template<> math_def inline float16 nd4j_exp<float16>(float16 val) { #ifdef NATIVE_HALFS return hexp(val.data); #else return (float16) expf((float) val); #endif } template<> math_def inline float nd4j_exp<float>(float val) { return expf(val); } template<> math_def inline double nd4j_exp<double>(double val) { return exp(val); } template<> math_def inline int nd4j_exp<int>(int val) { return expf((float) val); } template<> math_def inline float16 nd4j_floor<float16>(float16 val) { #ifdef NATIVE_HALFS return hfloor(val.data); #else return (float16) floorf((float) val); #endif } template<> math_def inline float nd4j_floor<float>(float val) { return floorf(val); } template<> math_def inline double nd4j_floor<double>(double val) { return floor(val); } template<> math_def inline int nd4j_floor<int>(int val) { return floorf((float) val); } template<> math_def inline float16 nd4j_log<float16>(float16 val) { #ifdef NATIVE_HALFS return hlog(val.data); #else return (float16) logf((float) val); #endif } template<> math_def inline float nd4j_log<float>(float val) { return logf(val); } template<> math_def inline double nd4j_log<double>(double val) { return log(val); } template<> math_def inline int nd4j_log<int>(int val) { return logf((int) val); } template<> math_def inline float16 nd4j_pow<float16>(float16 val, float16 val2) { return (float16) powf((float) val, (float) val2); } template<> math_def inline float nd4j_pow<float>(float val, float val2) { return powf(val, val2); } template<> math_def inline double nd4j_pow<double>(double val, double val2) { return pow(val, val2); } template<> math_def inline int nd4j_pow<int>(int val, int val2) { return powf((float) val, (float) val2); } template<> math_def inline float16 nd4j_round<float16>(float16 val) { return (float16) roundf((float) val); } template<> math_def inline float nd4j_round<float>(float val) { return roundf(val); } template<> math_def inline float nd4j_remainder<float>(float num, float denom) { return remainderf(num, denom); } template<> math_def inline double nd4j_remainder<double>(double num, double denom) { return remainder(num, denom); } template<> math_def inline float16 nd4j_remainder<float16>(float16 num, float16 denom) { return (float16) remainderf((float) num, (float) denom); } template<> math_def inline float nd4j_fmod<float>(float num, float denom) { return fmodf(num, denom); } template<> math_def inline double nd4j_fmod<double>(double num, double denom) { return fmod(num, denom); } template<> math_def inline float16 nd4j_fmod<float16>(float16 num, float16 denom) { return (float16) fmodf((float) num, (float) denom); } template<> math_def inline float nd4j_erf<float>(float num) { return erff(num); } template<> math_def inline double nd4j_erf<double>(double num) { return erf(num); } template<> math_def inline float16 nd4j_erf<float16>(float16 num) { return (float16) erff((float) num); } template<> math_def inline float nd4j_erfc<float>(float num) { return erfcf(num); } template<> math_def inline double nd4j_erfc<double>(double num) { return erfc(num); } template<> math_def inline float16 nd4j_erfc<float16>(float16 num) { return (float16) erfcf((float) num); } template<> math_def inline double nd4j_round<double>(double val) { return round(val); } template<> math_def inline int nd4j_round<int>(int val) { return round((float) val); } template<> math_def inline float16 nd4j_sin<float16>(float16 val) { #ifdef NATIVE_HALFS return hsin(val.data); #else return (float16) sinf((float) val); #endif } template<> math_def inline float nd4j_sin<float>(float val) { return sinf(val); } template<> math_def inline double nd4j_sin<double>(double val) { return sin(val); } template<> math_def inline int nd4j_sin<int>(int val) { return sin((float) val); } template<> math_def inline float16 nd4j_sinh<float16>(float16 val) { #ifdef NATIVE_HALFS return hsin(val.data); #else return (float16) sinh((float) val); #endif } template<> math_def inline float nd4j_sinh<float>(float val) { return sinhf(val); } template<> math_def inline double nd4j_sinh<double>(double val) { return sinh(val); } template<> math_def inline int nd4j_sinh<int>(int val) { return sinhf((float) val); } template<> math_def inline float16 nd4j_sqrt<float16>(float16 val) { #ifdef NATIVE_HALFS return hsqrt(val.data); #else return (float16) sqrtf((float) val); #endif } template<> math_def inline float nd4j_sqrt<float>(float val) { return sqrtf(val); } template<> math_def inline double nd4j_sqrt<double>(double val) { return sqrt(val); } template<> math_def inline int nd4j_sqrt<int>(int val) { return sqrtf((float) val); } template<> math_def inline float16 nd4j_tanh<float16>(float16 val) { return (float16) tanhf((float) val); } template<> math_def inline float nd4j_tanh<float>(float val) { return tanhf(val); } template<> math_def inline double nd4j_tanh<double>(double val) { return tanh(val); } template<> math_def inline int nd4j_tanh<int>(int val) { return tanhf((float) val); } template<> math_def inline float16 nd4j_tan<float16>(float16 val) { return (float16) tanf((float) val); } template<> math_def inline float nd4j_tan<float>(float val) { return tanf(val); } template<> math_def inline double nd4j_tan<double>(double val) { return tan(val); } template<> math_def inline int nd4j_tan<int>(int val) { return tanf((float) val); } template<> math_def inline float16 nd4j_acos<float16>(float16 val) { return (float16) acosf((float) val); } template<> math_def inline float nd4j_acos<float>(float val) { return acosf(val); } template<> math_def inline double nd4j_acos<double>(double val) { return acos(val); } template<> math_def inline int nd4j_acos<int>(int val) { return acosf((float) val); } template<> math_def inline float16 nd4j_acosh<float16>(float16 val) { return (float16) acoshf((float) val); } template<> math_def inline float nd4j_acosh<float>(float val) { return acoshf(val); } template<> math_def inline double nd4j_acosh<double>(double val) { return acos(val); } template<> math_def inline int nd4j_acosh<int>(int val) { return acoshf((float) val); } template<> math_def inline float16 nd4j_asin<float16>(float16 val) { return (float16) asinf((float) val); } template<> math_def inline float nd4j_asin<float>(float val) { return asinf(val); } template<> math_def inline double nd4j_asin<double>(double val) { return asin(val); } template<> math_def inline int nd4j_asin<int>(int val) { return asinf((float) val); } template<> math_def inline float16 nd4j_atan<float16>(float16 val) { return (float16) atanf((float)val); } template<> math_def inline float nd4j_atan<float>(float val) { return atanf(val); } template<> math_def inline double nd4j_atan<double>(double val) { return atan(val); } template<> math_def inline int nd4j_atan<int>(int val) { return atanf((float) val); } template<> math_def inline float16 nd4j_atanh<float16>(float16 val) { return (float16) atanhf((float)val); } template<> math_def inline float nd4j_atanh<float>(float val) { return atanhf(val); } template<> math_def inline double nd4j_atanh<double>(double val) { return atanh(val); } template<> math_def inline int nd4j_atanh<int>(int val) { return atanhf((float) val); } template<typename T> math_def inline void nd4j_swap(T &val1, T &val2) { T temp = val1; val1=val2; val2=temp; }; #ifdef __CUDACC__ namespace atomics { template <typename T> inline __device__ T nd4j_atomicAdd(T* address, T val); template <typename T> inline __device__ T nd4j_atomicSub(T* address, T val); template <typename T> inline __device__ T nd4j_atomicMul(T* address, T val); template <typename T> inline __device__ T nd4j_atomicDiv(T* address, T val); template <> inline __device__ double nd4j_atomicAdd<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val + __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ float16 nd4j_atomicAdd<float16>(float16* address, float16 val) { int* address_as_ull = (int*) address; long addr = (long) address; bool misaligned = addr & 0x3; if (misaligned) address_as_ull = (int *) (addr - 2); PAIR old, assumed, fresh; old.W = *address_as_ull; do { if (!misaligned) { float16 res = ((float16) old.B.H) + val; fresh.B.H = res.data; fresh.B.L = old.B.L; } else { float16 res = ((float16) old.B.L) + val; fresh.B.L = res.data; fresh.B.H = old.B.H; } assumed.W = old.W; old.W = atomicCAS(address_as_ull, assumed.W, fresh.W); } while (assumed.W != old.W); if (!misaligned) return old.B.H; else return old.B.L; } template <> inline __device__ double nd4j_atomicSub<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int *) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val - __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ double nd4j_atomicMul<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val * __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ double nd4j_atomicDiv<double>(double* address, double val) { unsigned long long int* address_as_ull = (unsigned long long int*) address; unsigned long long int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed,__double_as_longlong(val / __longlong_as_double(assumed))); } while (assumed != old); return __longlong_as_double(old); } template <> inline __device__ float nd4j_atomicAdd<float>(float* address, float val) { return atomicAdd(address,val); } template <> inline __device__ float nd4j_atomicSub<float>(float* address, float val) { int* address_as_ull = (int*) address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val - __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ float nd4j_atomicMul<float>(float* address, float val) { int* address_as_ull = ( int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val * __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } template <> inline __device__ float nd4j_atomicDiv<float>(float* address, float val) { int* address_as_ull = (int*)address; int old = *address_as_ull, assumed; do { assumed = old; old = atomicCAS(address_as_ull, assumed, __float_as_int(val * __float_as_int(assumed))); } while (assumed != old); return __int_as_float(old); } } #endif } } #endif /* TEMPLATEMATH_H_ */
deprecate.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % DDDD EEEEE PPPP RRRR EEEEE CCCC AAA TTTTT EEEEE % % D D E P P R R E C A A T E % % D D EEE PPPPP RRRR EEE C AAAAA T EEE % % D D E P R R E C A A T E % % DDDD EEEEE P R R EEEEE CCCC A A T EEEEE % % % % % % MagickCore Deprecated Methods % % % % Software Design % % Cristy % % October 2002 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://www.imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "magick/studio.h" #include "magick/blob.h" #include "magick/blob-private.h" #include "magick/cache.h" #include "magick/cache-view.h" #include "magick/channel.h" #include "magick/client.h" #include "magick/color.h" #include "magick/color-private.h" #include "magick/colormap.h" #include "magick/colormap-private.h" #include "magick/colorspace.h" #include "magick/composite.h" #include "magick/composite-private.h" #include "magick/constitute.h" #include "magick/deprecate.h" #include "magick/draw.h" #include "magick/draw-private.h" #include "magick/effect.h" #include "magick/enhance.h" #include "magick/exception.h" #include "magick/exception-private.h" #include "magick/fx.h" #include "magick/geometry.h" #include "magick/identify.h" #include "magick/image.h" #include "magick/image-private.h" #include "magick/list.h" #include "magick/log.h" #include "magick/memory_.h" #include "magick/magick.h" #include "magick/monitor.h" #include "magick/monitor-private.h" #include "magick/morphology.h" #include "magick/paint.h" #include "magick/pixel.h" #include "magick/pixel-accessor.h" #include "magick/pixel-private.h" #include "magick/property.h" #include "magick/quantize.h" #include "magick/random_.h" #include "magick/resource_.h" #include "magick/semaphore.h" #include "magick/semaphore-private.h" #include "magick/segment.h" #include "magick/splay-tree.h" #include "magick/statistic.h" #include "magick/string_.h" #include "magick/threshold.h" #include "magick/thread_.h" #include "magick/thread-private.h" #include "magick/token.h" #include "magick/transform.h" #include "magick/utility.h" #if !defined(MAGICKCORE_EXCLUDE_DEPRECATED) /* Global declarations. */ static MonitorHandler monitor_handler = (MonitorHandler) NULL; /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e C a c h e V i e w I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireCacheViewIndexes() returns the indexes associated with the specified % view. % % Deprecated, replace with: % % GetCacheViewVirtualIndexQueue(cache_view); % % The format of the AcquireCacheViewIndexes method is: % % const IndexPacket *AcquireCacheViewIndexes(const CacheView *cache_view) % % A description of each parameter follows: % % o cache_view: the cache view. % */ MagickExport const IndexPacket *AcquireCacheViewIndexes( const CacheView *cache_view) { return(GetCacheViewVirtualIndexQueue(cache_view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e C a c h e V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireCacheViewPixels() gets pixels from the in-memory or disk pixel cache % as defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % Deprecated, replace with: % % GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception); % % The format of the AcquireCacheViewPixels method is: % % const PixelPacket *AcquireCacheViewPixels(const CacheView *cache_view, % const ssize_t x,const ssize_t y,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_view: the cache view. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const PixelPacket *AcquireCacheViewPixels( const CacheView *cache_view,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows,ExceptionInfo *exception) { return(GetCacheViewVirtualPixels(cache_view,x,y,columns,rows,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireImagePixels() returns an immutable pixel region. If the % region is successfully accessed, a pointer to it is returned, otherwise % NULL is returned. The returned pointer may point to a temporary working % copy of the pixels or it may point to the original pixels in memory. % Performance is maximized if the selected region is part of one row, or one % or more full rows, since there is opportunity to access the pixels in-place % (without a copy) if the image is in RAM, or in a memory-mapped file. The % returned pointer should *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to access % the black color component or to obtain the colormap indexes (of type % IndexPacket) corresponding to the region. % % If you plan to modify the pixels, use GetAuthenticPixels() instead. % % Note, the AcquireImagePixels() and GetAuthenticPixels() methods are not % thread-safe. In a threaded environment, use GetCacheViewVirtualPixels() or % GetCacheViewAuthenticPixels() instead. % % Deprecated, replace with: % % GetVirtualPixels(image,x,y,columns,rows,exception); % % The format of the AcquireImagePixels() method is: % % const PixelPacket *AcquireImagePixels(const Image *image,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % % o exception: return any errors or warnings in this structure. % */ MagickExport const PixelPacket *AcquireImagePixels(const Image *image, const ssize_t x,const ssize_t y,const size_t columns, const size_t rows,ExceptionInfo *exception) { return(GetVirtualPixels(image,x,y,columns,rows,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireIndexes() returns the black channel or the colormap indexes % associated with the last call to QueueAuthenticPixels() or % GetVirtualPixels(). NULL is returned if the black channel or colormap % indexes are not available. % % Deprecated, replace with: % % GetVirtualIndexQueue(image); % % The format of the AcquireIndexes() method is: % % const IndexPacket *AcquireIndexes(const Image *image) % % A description of each parameter follows: % % o indexes: AcquireIndexes() returns the indexes associated with the last % call to QueueAuthenticPixels() or GetVirtualPixels(). % % o image: the image. % */ MagickExport const IndexPacket *AcquireIndexes(const Image *image) { return(GetVirtualIndexQueue(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireMemory() returns a pointer to a block of memory at least size bytes % suitably aligned for any use. % % The format of the AcquireMemory method is: % % void *AcquireMemory(const size_t size) % % A description of each parameter follows: % % o size: the size of the memory in bytes to allocate. % */ MagickExport void *AcquireMemory(const size_t size) { void *allocation; assert(size != 0); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); allocation=malloc(size); return(allocation); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e C a c h e V i e w P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOneCacheViewPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead. % % Deprecated, replace with: % % GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception); % % The format of the AcquireOneCacheViewPixel method is: % % MagickBooleanType AcquireOneCacheViewPixel(const CacheView *cache_view, % const ssize_t x,const ssize_t y,PixelPacket *pixel, % ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_view: the cache view. % % o x,y: These values define the offset of the pixel. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AcquireOneCacheViewPixel( const CacheView *cache_view,const ssize_t x,const ssize_t y, PixelPacket *pixel,ExceptionInfo *exception) { return(GetOneCacheViewVirtualPixel(cache_view,x,y,pixel,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e C a c h e V i e w V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOneCacheViewVirtualPixel() returns a single pixel at the specified % (x,y) location. The image background color is returned if an error occurs. % If you plan to modify the pixel, use GetOneCacheViewAuthenticPixel() instead. % % Deprecated, replace with: % % GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method, % x,y,pixel,exception); % % The format of the AcquireOneCacheViewPixel method is: % % MagickBooleanType AcquireOneCacheViewVirtualPixel( % const CacheView *cache_view, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) % % A description of each parameter follows: % % o cache_view: the cache view. % % o virtual_pixel_method: the virtual pixel method. % % o x,y: These values define the offset of the pixel. % % o pixel: return a pixel at the specified (x,y) location. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType AcquireOneCacheViewVirtualPixel( const CacheView *cache_view,const VirtualPixelMethod virtual_pixel_method, const ssize_t x,const ssize_t y,PixelPacket *pixel,ExceptionInfo *exception) { MagickBooleanType status; status=GetOneCacheViewVirtualMethodPixel(cache_view,virtual_pixel_method, x,y,pixel,exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e M a g i c k P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOneMagickPixel() returns a single pixel at the specified (x,y) % location. The image background color is returned if an error occurs. If % you plan to modify the pixel, use GetOnePixel() instead. % % Deprecated, replace with: % % MagickPixelPacket pixel; % GetOneVirtualMagickPixel(image,x,y,&pixel,exception); % % The format of the AcquireOneMagickPixel() method is: % % MagickPixelPacket AcquireOneMagickPixel(const Image image,const ssize_t x, % const ssize_t y,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickPixelPacket AcquireOneMagickPixel(const Image *image, const ssize_t x,const ssize_t y,ExceptionInfo *exception) { MagickPixelPacket pixel; (void) GetOneVirtualMagickPixel(image,x,y,&pixel,exception); return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOnePixel() returns a single pixel at the specified (x,y) location. % The image background color is returned if an error occurs. If you plan to % modify the pixel, use GetOnePixel() instead. % % Deprecated, replace with: % % PixelPacket pixel; % GetOneVirtualPixel(image,x,y,&pixel,exception); % % The format of the AcquireOnePixel() method is: % % PixelPacket AcquireOnePixel(const Image image,const ssize_t x, % const ssize_t y,ExceptionInfo exception) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket AcquireOnePixel(const Image *image,const ssize_t x, const ssize_t y,ExceptionInfo *exception) { PixelPacket pixel; (void) GetOneVirtualPixel(image,x,y,&pixel,exception); return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e O n e V i r t u a l P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireOneVirtualPixel() returns a single pixel at the specified (x,y) % location as defined by specified pixel method. The image background color % is returned if an error occurs. If you plan to modify the pixel, use % GetOnePixel() instead. % % Deprecated, replace with: % % PixelPacket pixel; % GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel,exception); % % The format of the AcquireOneVirtualPixel() method is: % % PixelPacket AcquireOneVirtualPixel(const Image image, % const VirtualPixelMethod virtual_pixel_method,const ssize_t x, % const ssize_t y,ExceptionInfo exception) % % A description of each parameter follows: % % o virtual_pixel_method: the virtual pixel method. % % o image: the image. % % o x,y: These values define the location of the pixel to return. % % o exception: return any errors or warnings in this structure. % */ MagickExport PixelPacket AcquireOneVirtualPixel(const Image *image, const VirtualPixelMethod virtual_pixel_method,const ssize_t x,const ssize_t y, ExceptionInfo *exception) { PixelPacket pixel; (void) GetOneVirtualMethodPixel(image,virtual_pixel_method,x,y,&pixel, exception); return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquirePixels() returns the pixels associated with the last call to % QueueAuthenticPixels() or GetVirtualPixels(). % % Deprecated, replace with: % % GetVirtualPixelQueue(image); % % The format of the AcquirePixels() method is: % % const PixelPacket *AcquirePixels(const Image image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const PixelPacket *AcquirePixels(const Image *image) { return(GetVirtualPixelQueue(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A c q u i r e S e m a p h o r e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AcquireSemaphoreInfo() acquires a semaphore. % % The format of the AcquireSemaphoreInfo method is: % % void AcquireSemaphoreInfo(SemaphoreInfo **semaphore_info) % % A description of each parameter follows: % % o semaphore_info: Specifies a pointer to an SemaphoreInfo structure. % */ MagickExport void AcquireSemaphoreInfo(SemaphoreInfo **semaphore_info) { assert(semaphore_info != (SemaphoreInfo **) NULL); if (*semaphore_info == (SemaphoreInfo *) NULL) { InitializeMagickMutex(); LockMagickMutex(); if (*semaphore_info == (SemaphoreInfo *) NULL) *semaphore_info=AllocateSemaphoreInfo(); UnlockMagickMutex(); } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n i t y I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffinityImage() replaces the colors of an image with the closest color from % a reference image. % % Deprecated, replace with: % % RemapImage(quantize_info,image,affinity_image); % % The format of the AffinityImage method is: % % MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info, % Image *image,const Image *affinity_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o image: the image. % % o affinity_image: the reference image. % */ MagickExport MagickBooleanType AffinityImage(const QuantizeInfo *quantize_info, Image *image,const Image *affinity_image) { return(RemapImage(quantize_info,image,affinity_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A f f i n i t y I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AffinityImages() replaces the colors of a sequence of images with the % closest color from a reference image. % % Deprecated, replace with: % % RemapImages(quantize_info,images,affinity_image); % % The format of the AffinityImage method is: % % MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info, % Image *images,Image *affinity_image) % % A description of each parameter follows: % % o quantize_info: Specifies a pointer to an QuantizeInfo structure. % % o images: the image sequence. % % o affinity_image: the reference image. % */ MagickExport MagickBooleanType AffinityImages(const QuantizeInfo *quantize_info, Image *images,const Image *affinity_image) { return(RemapImages(quantize_info,images,affinity_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A l l o c a t e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AllocateImage() returns a pointer to an image structure initialized to % default values. % % Deprecated, replace with: % % AcquireImage(image_info); % % The format of the AllocateImage method is: % % Image *AllocateImage(const ImageInfo *image_info) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % */ MagickExport Image *AllocateImage(const ImageInfo *image_info) { return(AcquireImage(image_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A l l o c a t e I m a g e C o l o r m a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AllocateImageColormap() allocates an image colormap and initializes % it to a linear gray colorspace. If the image already has a colormap, % it is replaced. AllocateImageColormap() returns MagickTrue if successful, % otherwise MagickFalse if there is not enough memory. % % Deprecated, replace with: % % AcquireImageColormap(image,colors); % % The format of the AllocateImageColormap method is: % % MagickBooleanType AllocateImageColormap(Image *image, % const size_t colors) % % A description of each parameter follows: % % o image: the image. % % o colors: the number of colors in the image colormap. % */ MagickExport MagickBooleanType AllocateImageColormap(Image *image, const size_t colors) { return(AcquireImageColormap(image,colors)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A l l o c a t e N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AllocateNextImage() initializes the next image in a sequence to % default values. The next member of image points to the newly allocated % image. If there is a memory shortage, next is assigned NULL. % % Deprecated, replace with: % % AcquireNextImage(image_info,image); % % The format of the AllocateNextImage method is: % % void AllocateNextImage(const ImageInfo *image_info,Image *image) % % A description of each parameter follows: % % o image_info: Many of the image default values are set from this % structure. For example, filename, compression, depth, background color, % and others. % % o image: the image. % */ MagickExport void AllocateNextImage(const ImageInfo *image_info,Image *image) { AcquireNextImage(image_info,image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A l l o c a t e S t r i n g % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AllocateString() allocates memory for a string and copies the source string % to that memory location (and returns it). % % The format of the AllocateString method is: % % char *AllocateString(const char *source) % % A description of each parameter follows: % % o source: A character string. % */ MagickExport char *AllocateString(const char *source) { char *destination; size_t length; assert(source != (const char *) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); length=strlen(source)+MaxTextExtent+1; destination=(char *) AcquireQuantumMemory(length,sizeof(*destination)); if (destination == (char *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); *destination='\0'; (void) CopyMagickString(destination,source,length); return(destination); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A v e r a g e I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AverageImages() takes a set of images and averages them together. Each % image in the set must have the same width and height. AverageImages() % returns a single image with each corresponding pixel component of each % image averaged. On failure, a NULL image is returned and exception % describes the reason for the failure. % % Deprecated, replace with: % % EvaluateImages(images,MeanEvaluateOperator,exception); % % The format of the AverageImages method is: % % Image *AverageImages(Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AverageImages(const Image *images,ExceptionInfo *exception) { return(EvaluateImages(images,MeanEvaluateOperator,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a n n e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Extract a channel from the image. A channel is a particular color component % of each pixel in the image. % % Deprecated, replace with: % % SeparateImageChannel(image,channel); % % The format of the ChannelImage method is: % % unsigned int ChannelImage(Image *image,const ChannelType channel) % % A description of each parameter follows: % % o image: the image. % % o channel: Identify which channel to extract: RedChannel, GreenChannel, % BlueChannel, OpacityChannel, CyanChannel, MagentaChannel, YellowChannel, % or BlackChannel. % */ MagickExport unsigned int ChannelImage(Image *image,const ChannelType channel) { return(SeparateImageChannel(image,channel)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C h a n n e l T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ChannelThresholdImage() changes the value of individual pixels based on % the intensity of each pixel channel. The result is a high-contrast image. % % The format of the ChannelThresholdImage method is: % % unsigned int ChannelThresholdImage(Image *image,const char *level) % % A description of each parameter follows: % % o image: the image. % % o level: define the threshold values. % */ MagickExport unsigned int ChannelThresholdImage(Image *image,const char *level) { MagickPixelPacket threshold; GeometryInfo geometry_info; unsigned int flags, status; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (level == (char *) NULL) return(MagickFalse); flags=ParseGeometry(level,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.sigma; if ((flags & SigmaValue) == 0) threshold.green=threshold.red; threshold.blue=geometry_info.xi; if ((flags & XiValue) == 0) threshold.blue=threshold.red; status=BilevelImageChannel(image,RedChannel,threshold.red); status&=BilevelImageChannel(image,GreenChannel,threshold.green); status&=BilevelImageChannel(image,BlueChannel,threshold.blue); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l i p I m a g e P a t h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClipPathImage() sets the image clip mask based any clipping path information % if it exists. % % Deprecated, replace with: % % ClipImagePath(image,pathname,inside); % % The format of the ClipImage method is: % % MagickBooleanType ClipPathImage(Image *image,const char *pathname, % const MagickBooleanType inside) % % A description of each parameter follows: % % o image: the image. % % o pathname: name of clipping path resource. If name is preceded by #, use % clipping path numbered by name. % % o inside: if non-zero, later operations take effect inside clipping path. % Otherwise later operations take effect outside clipping path. % */ MagickExport MagickBooleanType ClipPathImage(Image *image,const char *pathname, const MagickBooleanType inside) { return(ClipImagePath(image,pathname,inside)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e I m a g e A t t r i b u t e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneImageAttributes() clones one or more image attributes. % % Deprecated, replace with: % % CloneImageProperties(image,clone_image); % % The format of the CloneImageAttributes method is: % % MagickBooleanType CloneImageAttributes(Image *image, % const Image *clone_image) % % A description of each parameter follows: % % o image: the image. % % o clone_image: the clone image. % */ MagickExport MagickBooleanType CloneImageAttributes(Image *image, const Image *clone_image) { return(CloneImageProperties(image,clone_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o n e M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloneMemory() copies size bytes from memory area source to the destination. % Copying between objects that overlap will take place correctly. It returns % destination. % % The format of the CloneMemory method is: % % void *CloneMemory(void *destination,const void *source, % const size_t size) % % A description of each parameter follows: % % o destination: the destination. % % o source: the source. % % o size: the size of the memory in bytes to allocate. % */ MagickExport void *CloneMemory(void *destination,const void *source, const size_t size) { register const unsigned char *p; register unsigned char *q; register ssize_t i; assert(destination != (void *) NULL); assert(source != (const void *) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); p=(const unsigned char *) source; q=(unsigned char *) destination; if ((p <= q) || ((p+size) >= q)) return(CopyMagickMemory(destination,source,size)); /* Overlap, copy backwards. */ p+=size; q+=size; for (i=(ssize_t) (size-1); i >= 0; i--) *--q=(*--p); return(destination); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l o s e C a c h e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % CloseCacheView() closes the specified view returned by a previous call to % OpenCacheView(). % % Deprecated, replace with: % % DestroyCacheView(view_info); % % The format of the CloseCacheView method is: % % CacheView *CloseCacheView(CacheView *view_info) % % A description of each parameter follows: % % o view_info: the address of a structure of type CacheView. % */ MagickExport CacheView *CloseCacheView(CacheView *view_info) { return(DestroyCacheView(view_info)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C o l o r F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ColorFloodfill() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % The format of the ColorFloodfillImage method is: % % MagickBooleanType ColorFloodfillImage(Image *image, % const DrawInfo *draw_info,const PixelPacket target, % const ssize_t x_offset,const ssize_t y_offset,const PaintMethod method) % % A description of each parameter follows: % % o image: the image. % % o draw_info: the draw info. % % o target: the RGB value of the target color. % % o x,y: the starting location of the operation. % % o method: Choose either FloodfillMethod or FillToBorderMethod. % */ #define MaxStacksize (1UL << 15) #define PushSegmentStack(up,left,right,delta) \ { \ if (s >= (segment_stack+MaxStacksize)) \ ThrowBinaryException(DrawError,"SegmentStackOverflow",image->filename) \ else \ { \ if ((((up)+(delta)) >= 0) && (((up)+(delta)) < (ssize_t) image->rows)) \ { \ s->x1=(double) (left); \ s->y1=(double) (up); \ s->x2=(double) (right); \ s->y2=(double) (delta); \ s++; \ } \ } \ } MagickExport MagickBooleanType ColorFloodfillImage(Image *image, const DrawInfo *draw_info,const PixelPacket target,const ssize_t x_offset, const ssize_t y_offset,const PaintMethod method) { Image *floodplane_image; MagickBooleanType skip; PixelPacket fill_color; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(draw_info != (DrawInfo *) NULL); assert(draw_info->signature == MagickCoreSignature); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue, &image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); /* Set floodfill color. */ segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize, sizeof(*segment_stack)); if (segment_stack == (SegmentInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Push initial segment on stack. */ x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); while (s > segment_stack) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception); q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) == MagickFalse) break; } else if (IsColorSimilar(image,p,&target) != MagickFalse) break; q->opacity=(Quantum) TransparentOpacity; p--; q--; } if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetVirtualPixels(image,x,y,image->columns-x,1, &image->exception); q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) == MagickFalse) break; } else if (IsColorSimilar(image,p,&target) != MagickFalse) break; q->opacity=(Quantum) TransparentOpacity; p++; q++; } if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1, &image->exception); q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) != MagickFalse) break; } else if (IsColorSimilar(image,p,&target) == MagickFalse) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; /* Tile fill color onto floodplane. */ p=GetVirtualPixels(floodplane_image,0,y,image->columns,1, &image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) { (void) GetFillColor(draw_info,x,y,&fill_color); MagickCompositeOver(&fill_color,(MagickRealType) fill_color.opacity,q, (MagickRealType) q->opacity,q); } p++; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s t i t u t e C o m p o n e n t G e n e s i s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteComponentGenesis() instantiates the constitute component. % % The format of the ConstituteComponentGenesis method is: % % MagickBooleanType ConstituteComponentGenesis(void) % */ MagickExport MagickBooleanType ConstituteComponentGenesis(void) { return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + C o n s t i t u t e C o m p o n e n t T e r m i n u s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ConstituteComponentTerminus() destroys the constitute component. % % The format of the ConstituteComponentTerminus method is: % % ConstituteComponentTerminus(void) % */ MagickExport void ConstituteComponentTerminus(void) { } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageAttribute() deletes an attribute from the image. % % Deprecated, replace with: % % DeleteImageProperty(image,key); % % The format of the DeleteImageAttribute method is: % % MagickBooleanType DeleteImageAttribute(Image *image,const char *key) % % A description of each parameter follows: % % o image: the image info. % % o key: the image key. % */ MagickExport MagickBooleanType DeleteImageAttribute(Image *image, const char *key) { return(DeleteImageProperty(image,key)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteImageList() deletes an image at the specified position in the list. % % The format of the DeleteImageList method is: % % unsigned int DeleteImageList(Image *images,const ssize_t offset) % % A description of each parameter follows: % % o images: the image list. % % o offset: the position within the list. % */ MagickExport unsigned int DeleteImageList(Image *images,const ssize_t offset) { register ssize_t i; if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); while (GetPreviousImageInList(images) != (Image *) NULL) images=GetPreviousImageInList(images); for (i=0; i < offset; i++) { if (GetNextImageInList(images) == (Image *) NULL) return(MagickFalse); images=GetNextImageInList(images); } DeleteImageFromList(&images); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e l e t e M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DeleteMagickRegistry() deletes an entry in the registry as defined by the id. % It returns MagickTrue if the entry is deleted otherwise MagickFalse if no % entry is found in the registry that matches the id. % % Deprecated, replace with: % % char key[MaxTextExtent]; % FormatLocaleString(key,MaxTextExtent,"%ld\n",id); % DeleteImageRegistry(key); % % The format of the DeleteMagickRegistry method is: % % MagickBooleanType DeleteMagickRegistry(const ssize_t id) % % A description of each parameter follows: % % o id: the registry id. % */ MagickExport MagickBooleanType DeleteMagickRegistry(const ssize_t id) { char key[MaxTextExtent]; (void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id); return(DeleteImageRegistry(key)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y C o n s t i t u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyConstitute() destroys the constitute component. % % The format of the DestroyConstitute method is: % % DestroyConstitute(void) % */ MagickExport void DestroyConstitute(void) { } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + D e s t r o y M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMagickRegistry() deallocates memory associated the magick registry. % % Deprecated, replace with: % % RegistryComponentTerminus(); % % The format of the DestroyMagickRegistry method is: % % void DestroyMagickRegistry(void) % */ MagickExport void DestroyMagickRegistry(void) { RegistryComponentTerminus(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s c r i b e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DescribeImage() describes an image by printing its attributes to the file. % Attributes include the image width, height, size, and others. % % Deprecated, replace with: % % IdentifyImage(image,file,verbose); % % The format of the DescribeImage method is: % % MagickBooleanType DescribeImage(Image *image,FILE *file, % const MagickBooleanType verbose) % % A description of each parameter follows: % % o image: the image. % % o file: the file, typically stdout. % % o verbose: A value other than zero prints more detailed information % about the image. % */ MagickExport MagickBooleanType DescribeImage(Image *image,FILE *file, const MagickBooleanType verbose) { return(IdentifyImage(image,file,verbose)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e A t t r i b u t e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImageAttributes() deallocates memory associated with the image % attribute list. % % The format of the DestroyImageAttributes method is: % % DestroyImageAttributes(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void DestroyImageAttributes(Image *image) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->attributes != (void *) NULL) image->attributes=(void *) DestroySplayTree((SplayTreeInfo *) image->attributes); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyImages() destroys an image list. % % Deprecated, replace with: % % DestroyImageList(image); % % The format of the DestroyImages method is: % % void DestroyImages(Image *image) % % A description of each parameter follows: % % o image: the image sequence. % */ MagickExport void DestroyImages(Image *image) { if (image == (Image *) NULL) return; if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3"); image=DestroyImageList(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y M a g i c k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyMagick() destroys the ImageMagick environment. % % Deprecated, replace with: % % MagickCoreTerminus(); % % The format of the DestroyMagick function is: % % DestroyMagick(void) % */ MagickExport void DestroyMagick(void) { MagickCoreTerminus(); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D i s p a t c h I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DispatchImage() extracts pixel data from an image and returns it to you. % The method returns MagickFalse on success otherwise MagickTrue if an error is % encountered. The data is returned as char, short int, int, ssize_t, float, % or double in the order specified by map. % % Suppose you want to extract the first scanline of a 640x480 image as % character data in red-green-blue order: % % DispatchImage(image,0,0,640,1,"RGB",CharPixel,pixels,exception); % % Deprecated, replace with: % % ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels, % exception); % % The format of the DispatchImage method is: % % unsigned int DispatchImage(const Image *image,const ssize_t x_offset, % const ssize_t y_offset,const size_t columns, % const size_t rows,const char *map,const StorageType type, % void *pixels,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o x_offset, y_offset, columns, rows: These values define the perimeter % of a region of pixels you want to extract. % % o map: This string reflects the expected ordering of the pixel array. % It can be any combination or order of R = red, G = green, B = blue, % A = alpha, C = cyan, Y = yellow, M = magenta, K = black, or % I = intensity (for grayscale). % % o type: Define the data type of the pixels. Float and double types are % normalized to [0..1] otherwise [0..QuantumRange]. Choose from these % types: CharPixel, ShortPixel, IntegerPixel, LongPixel, FloatPixel, or % DoublePixel. % % o pixels: This array of values contain the pixel components as defined by % map and type. You must preallocate this array where the expected % length varies depending on the values of width, height, map, and type. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int DispatchImage(const Image *image,const ssize_t x_offset, const ssize_t y_offset,const size_t columns,const size_t rows, const char *map,const StorageType type,void *pixels,ExceptionInfo *exception) { unsigned int status; if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6"); status=ExportImagePixels(image,x_offset,y_offset,columns,rows,map,type,pixels, exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % E x t r a c t S u b i m a g e F r o m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ExtractSubimageFromImageImage() extracts a region of the image that most % closely resembles the reference. % % The format of the ExtractSubimageFromImageImage method is: % % Image *ExtractSubimageFromImage(const Image *image, % const Image *reference,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o reference: find an area of the image that closely resembles this image. % % o exception: return any errors or warnings in this structure. % */ static double GetSimilarityMetric(const Image *image,const Image *reference, const ssize_t x_offset,const ssize_t y_offset, const double similarity_threshold,ExceptionInfo *exception) { CacheView *image_view, *reference_view; double channels, normalized_similarity, similarity; ssize_t y; /* Compute the similarity in pixels between two images. */ normalized_similarity=1.0; similarity=0.0; channels=3; if ((image->matte != MagickFalse) && (reference->matte != MagickFalse)) channels++; if ((image->colorspace == CMYKColorspace) && (reference->colorspace == CMYKColorspace)) channels++; image_view=AcquireVirtualCacheView(image,exception); reference_view=AcquireVirtualCacheView(reference,exception); for (y=0; y < (ssize_t) reference->rows; y++) { register const IndexPacket *indexes, *reference_indexes; register const PixelPacket *p, *q; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,x_offset,y_offset+y, reference->columns,1,exception); q=GetCacheViewVirtualPixels(reference_view,0,y,reference->columns,1, exception); if ((p == (const PixelPacket *) NULL) || (q == (const PixelPacket *) NULL)) continue; indexes=GetCacheViewVirtualIndexQueue(image_view); reference_indexes=GetCacheViewVirtualIndexQueue(reference_view); for (x=0; x < (ssize_t) reference->columns; x++) { MagickRealType pixel; pixel=QuantumScale*(GetPixelRed(p)-(double) GetPixelRed(q)); similarity+=pixel*pixel; pixel=QuantumScale*(GetPixelGreen(p)-(double) GetPixelGreen(q)); similarity+=pixel*pixel; pixel=QuantumScale*(GetPixelBlue(p)-(double) GetPixelBlue(q)); similarity+=pixel*pixel; if ((image->matte != MagickFalse) && (reference->matte != MagickFalse)) { pixel=QuantumScale*(GetPixelOpacity(p)-(double) GetPixelOpacity(q)); similarity+=pixel*pixel; } if ((image->colorspace == CMYKColorspace) && (reference->colorspace == CMYKColorspace)) { pixel=QuantumScale*(GetPixelIndex(indexes+x)-(double) GetPixelIndex(reference_indexes+x)); similarity+=pixel*pixel; } p++; q++; } normalized_similarity=sqrt(similarity)/reference->columns/reference->rows/ channels; if (normalized_similarity > similarity_threshold) break; } reference_view=DestroyCacheView(reference_view); image_view=DestroyCacheView(image_view); return(normalized_similarity); } MagickExport Image *ExtractSubimageFromImage(Image *image, const Image *reference,ExceptionInfo *exception) { double similarity_threshold; RectangleInfo offset; ssize_t y; /* Extract reference from image. */ if ((reference->columns > image->columns) || (reference->rows > image->rows)) return((Image *) NULL); similarity_threshold=(double) image->columns*image->rows; SetGeometry(reference,&offset); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static,4) #endif for (y=0; y < (ssize_t) (image->rows-reference->rows); y++) { double similarity; register ssize_t x; for (x=0; x < (ssize_t) (image->columns-reference->columns); x++) { similarity=GetSimilarityMetric(image,reference,x,y,similarity_threshold, exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ExtractSubimageFromImage) #endif if (similarity < similarity_threshold) { similarity_threshold=similarity; offset.x=x; offset.y=y; } } } if (similarity_threshold > (QuantumScale*reference->fuzz/100.0)) return((Image *) NULL); return(CropImage(image,&offset,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F l a t t e n I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FlattenImages() Obsolete Function: Use MergeImageLayers() instead. % % Deprecated, replace with: % % MergeImageLayers(image,FlattenLayer,exception); % % The format of the FlattenImage method is: % % Image *FlattenImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *FlattenImages(Image *image,ExceptionInfo *exception) { return(MergeImageLayers(image,FlattenLayer,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r m a t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FormatImageAttribute() permits formatted key/value pairs to be saved as an % image attribute. % % The format of the FormatImageAttribute method is: % % MagickBooleanType FormatImageAttribute(Image *image,const char *key, % const char *format,...) % % A description of each parameter follows. % % o image: The image. % % o key: The attribute key. % % o format: A string describing the format to use to write the remaining % arguments. % */ MagickExport MagickBooleanType FormatImageAttributeList(Image *image, const char *key,const char *format,va_list operands) { char value[MaxTextExtent]; int n; #if defined(MAGICKCORE_HAVE_VSNPRINTF) n=vsnprintf(value,MaxTextExtent,format,operands); #else n=vsprintf(value,format,operands); #endif if (n < 0) value[MaxTextExtent-1]='\0'; return(SetImageProperty(image,key,value)); } MagickExport MagickBooleanType FormatImagePropertyList(Image *image, const char *property,const char *format,va_list operands) { char value[MaxTextExtent]; int n; #if defined(MAGICKCORE_HAVE_VSNPRINTF) n=vsnprintf(value,MaxTextExtent,format,operands); #else n=vsprintf(value,format,operands); #endif if (n < 0) value[MaxTextExtent-1]='\0'; return(SetImageProperty(image,property,value)); } MagickExport MagickBooleanType FormatImageAttribute(Image *image, const char *key,const char *format,...) { char value[MaxTextExtent]; int n; va_list operands; va_start(operands,format); n=FormatLocaleStringList(value,MaxTextExtent,format,operands); (void) n; va_end(operands); return(SetImageProperty(image,key,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r m a t M a g i c k S t r i n g % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FormatMagickString() prints formatted output of a variable argument list. % % The format of the FormatMagickString method is: % % ssize_t FormatMagickString(char *string,const size_t length, % const char *format,...) % % A description of each parameter follows. % % o string: FormatMagickString() returns the formatted string in this % character buffer. % % o length: the maximum length of the string. % % o format: A string describing the format to use to write the remaining % arguments. % */ MagickExport ssize_t FormatMagickStringList(char *string,const size_t length, const char *format,va_list operands) { int n; #if defined(MAGICKCORE_HAVE_VSNPRINTF) n=vsnprintf(string,length,format,operands); #else n=vsprintf(string,format,operands); #endif if (n < 0) string[length-1]='\0'; return((ssize_t) n); } MagickExport ssize_t FormatMagickString(char *string,const size_t length, const char *format,...) { ssize_t n; va_list operands; va_start(operands,format); n=(ssize_t) FormatMagickStringList(string,length,format,operands); va_end(operands); return(n); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % F o r m a t S t r i n g % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FormatString() prints formatted output of a variable argument list. % % The format of the FormatString method is: % % void FormatString(char *string,const char *format,...) % % A description of each parameter follows. % % o string: Method FormatString returns the formatted string in this % character buffer. % % o format: A string describing the format to use to write the remaining % arguments. % */ MagickExport void FormatStringList(char *string,const char *format, va_list operands) { int n; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); #if defined(MAGICKCORE_HAVE_VSNPRINTF) n=vsnprintf(string,MaxTextExtent,format,operands); #else n=vsprintf(string,format,operands); #endif if (n < 0) string[MaxTextExtent-1]='\0'; } MagickExport void FormatString(char *string,const char *format,...) { va_list operands; va_start(operands,format); (void) FormatLocaleStringList(string,MaxTextExtent,format,operands); va_end(operands); return; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F u z z y C o l o r M a t c h % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FuzzyColorMatch() returns true if two pixels are identical in color. % % The format of the ColorMatch method is: % % void FuzzyColorMatch(const PixelPacket *p,const PixelPacket *q, % const double fuzz) % % A description of each parameter follows: % % o p: Pixel p. % % o q: Pixel q. % % o distance: Define how much tolerance is acceptable to consider % two colors as the same. % */ MagickExport unsigned int FuzzyColorMatch(const PixelPacket *p, const PixelPacket *q,const double fuzz) { MagickPixelPacket pixel; register MagickRealType distance; if ((fuzz == 0.0) && (GetPixelRed(p) == GetPixelRed(q)) && (GetPixelGreen(p) == GetPixelGreen(q)) && (GetPixelBlue(p) == GetPixelBlue(q))) return(MagickTrue); pixel.red=GetPixelRed(p)-(MagickRealType) GetPixelRed(q); distance=pixel.red*pixel.red; if (distance > (fuzz*fuzz)) return(MagickFalse); pixel.green=GetPixelGreen(p)-(MagickRealType) GetPixelGreen(q); distance+=pixel.green*pixel.green; if (distance > (fuzz*fuzz)) return(MagickFalse); pixel.blue=GetPixelBlue(p)-(MagickRealType) GetPixelBlue(q); distance+=pixel.blue*pixel.blue; if (distance > (fuzz*fuzz)) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F u z z y C o l o r C o m p a r e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FuzzyColorCompare() returns MagickTrue if the distance between two colors is % less than the specified distance in a linear three dimensional color space. % This method is used by ColorFloodFill() and other algorithms which % compare two colors. % % The format of the FuzzyColorCompare method is: % % void FuzzyColorCompare(const Image *image,const PixelPacket *p, % const PixelPacket *q) % % A description of each parameter follows: % % o image: the image. % % o p: Pixel p. % % o q: Pixel q. % */ MagickExport MagickBooleanType FuzzyColorCompare(const Image *image, const PixelPacket *p,const PixelPacket *q) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5"); return(IsColorSimilar(image,p,q)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + F u z z y O p a c i t y C o m p a r e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % FuzzyOpacityCompare() returns true if the distance between two opacity % values is less than the specified distance in a linear color space. This % method is used by MatteFloodFill() and other algorithms which compare % two opacity values. % % Deprecated, replace with: % % IsOpacitySimilar(image,p,q); % % The format of the FuzzyOpacityCompare method is: % % void FuzzyOpacityCompare(const Image *image,const PixelPacket *p, % const PixelPacket *q) % % A description of each parameter follows: % % o image: the image. % % o p: Pixel p. % % o q: Pixel q. % */ MagickExport MagickBooleanType FuzzyOpacityCompare(const Image *image, const PixelPacket *p,const PixelPacket *q) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.5"); return(IsOpacitySimilar(image,p,q)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t C o n f i g u r e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetConfigureBlob() returns the specified configure file as a blob. % % The format of the GetConfigureBlob method is: % % void *GetConfigureBlob(const char *filename,ExceptionInfo *exception) % % A description of each parameter follows: % % o filename: the configure file name. % % o path: return the full path information of the configure file. % % o length: This pointer to a size_t integer sets the initial length of the % blob. On return, it reflects the actual length of the blob. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetConfigureBlob(const char *filename,char *path, size_t *length,ExceptionInfo *exception) { void *blob; assert(filename != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",filename); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); assert(path != (char *) NULL); assert(length != (size_t *) NULL); assert(exception != (ExceptionInfo *) NULL); blob=(void *) NULL; (void) CopyMagickString(path,filename,MaxTextExtent); #if defined(MAGICKCORE_INSTALLED_SUPPORT) #if defined(MAGICKCORE_LIBRARY_PATH) if (blob == (void *) NULL) { /* Search hard coded paths. */ (void) FormatLocaleString(path,MaxTextExtent,"%s%s", MAGICKCORE_LIBRARY_PATH,filename); if (IsPathAccessible(path) != MagickFalse) blob=FileToBlob(path,~0UL,length,exception); } #endif #if defined(MAGICKCORE_WINDOWS_SUPPORT) && !(defined(MAGICKCORE_CONFIGURE_PATH) || defined(MAGICKCORE_SHARE_PATH)) if (blob == (void *) NULL) { unsigned char *key_value; /* Locate file via registry key. */ key_value=NTRegistryKeyLookup("ConfigurePath"); if (key_value != (unsigned char *) NULL) { (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",(char *) key_value,DirectorySeparator,filename); if (IsPathAccessible(path) != MagickFalse) blob=FileToBlob(path,~0UL,length,exception); } } #endif #else if (blob == (void *) NULL) { char *home; home=GetEnvironmentValue("MAGICK_HOME"); if (home != (char *) NULL) { /* Search MAGICK_HOME. */ #if !defined(MAGICKCORE_POSIX_SUPPORT) (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",home, DirectorySeparator,filename); #else (void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",home, MAGICKCORE_LIBRARY_RELATIVE_PATH,filename); #endif if (IsPathAccessible(path) != MagickFalse) blob=FileToBlob(path,~0UL,length,exception); home=DestroyString(home); } home=GetEnvironmentValue("HOME"); if (home == (char *) NULL) home=GetEnvironmentValue("USERPROFILE"); if (home != (char *) NULL) { /* Search $HOME/.magick. */ (void) FormatLocaleString(path,MaxTextExtent,"%s%s.magick%s%s",home, DirectorySeparator,DirectorySeparator,filename); if ((IsPathAccessible(path) != MagickFalse) && (blob == (void *) NULL)) blob=FileToBlob(path,~0UL,length,exception); home=DestroyString(home); } } if ((blob == (void *) NULL) && (*GetClientPath() != '\0')) { #if !defined(MAGICKCORE_POSIX_SUPPORT) (void) FormatLocaleString(path,MaxTextExtent,"%s%s%s",GetClientPath(), DirectorySeparator,filename); #else char prefix[MaxTextExtent]; /* Search based on executable directory if directory is known. */ (void) CopyMagickString(prefix,GetClientPath(), MaxTextExtent); ChopPathComponents(prefix,1); (void) FormatLocaleString(path,MaxTextExtent,"%s/lib/%s/%s",prefix, MAGICKCORE_LIBRARY_RELATIVE_PATH,filename); #endif if (IsPathAccessible(path) != MagickFalse) blob=FileToBlob(path,~0UL,length,exception); } /* Search current directory. */ if ((blob == (void *) NULL) && (IsPathAccessible(path) != MagickFalse)) blob=FileToBlob(path,~0UL,length,exception); #if defined(MAGICKCORE_WINDOWS_SUPPORT) /* Search Windows registry. */ if (blob == (void *) NULL) blob=NTResourceToBlob(filename); #endif #endif if (blob == (void *) NULL) (void) ThrowMagickException(exception,GetMagickModule(),ConfigureWarning, "UnableToOpenConfigureFile","`%s'",path); return(blob); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t C a c h e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCacheView() gets pixels from the in-memory or disk pixel cache as % defined by the geometry parameters. A pointer to the pixels is returned if % the pixels are transferred, otherwise a NULL is returned. % % Deprecated, replace with: % % GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows, % GetCacheViewException(cache_view)); % % The format of the GetCacheView method is: % % PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows) % % A description of each parameter follows: % % o cache_view: the address of a structure of type CacheView. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *GetCacheView(CacheView *cache_view,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows) { PixelPacket *pixels; pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows, GetCacheViewException(cache_view)); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t C a c h e V i e w I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCacheViewIndexes() returns the indexes associated with the specified % view. % % Deprecated, replace with: % % GetCacheViewAuthenticIndexQueue(cache_view); % % The format of the GetCacheViewIndexes method is: % % IndexPacket *GetCacheViewIndexes(CacheView *cache_view) % % A description of each parameter follows: % % o cache_view: the cache view. % */ MagickExport IndexPacket *GetCacheViewIndexes(CacheView *cache_view) { return(GetCacheViewAuthenticIndexQueue(cache_view)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t C a c h e V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as % defined by the geometry parameters. A pointer to the pixels is returned if % the pixels are transferred, otherwise a NULL is returned. % % Deprecated, replace with: % % GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows, % GetCacheViewException(cache_view)); % % The format of the GetCacheViewPixels method is: % % PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows) % % A description of each parameter follows: % % o cache_view: the cache view. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *GetCacheViewPixels(CacheView *cache_view,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows) { PixelPacket *pixels; pixels=GetCacheViewAuthenticPixels(cache_view,x,y,columns,rows, GetCacheViewException(cache_view)); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t E x c e p t i o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetExceptionInfo() initializes an exception to default values. % % The format of the GetExceptionInfo method is: % % GetExceptionInfo(ExceptionInfo *exception) % % A description of each parameter follows: % % o exception: the exception info. % */ MagickExport void GetExceptionInfo(ExceptionInfo *exception) { assert(exception != (ExceptionInfo *) NULL); (void) ResetMagickMemory(exception,0,sizeof(*exception)); exception->severity=UndefinedException; exception->exceptions=(void *) NewLinkedList(0); exception->semaphore=AllocateSemaphoreInfo(); exception->signature=MagickCoreSignature; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageAttribute() searches the list of image attributes and returns % a pointer to the attribute if it exists otherwise NULL. % % The format of the GetImageAttribute method is: % % const ImageAttribute *GetImageAttribute(const Image *image, % const char *key) % % A description of each parameter follows: % % o image: the image. % % o key: These character strings are the name of an image attribute to % return. % */ static void *DestroyAttribute(void *attribute) { register ImageAttribute *p; p=(ImageAttribute *) attribute; if (p->value != (char *) NULL) p->value=DestroyString(p->value); return(RelinquishMagickMemory(p)); } MagickExport const ImageAttribute *GetImageAttribute(const Image *image, const char *key) { const char *value; ImageAttribute *attribute; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1"); value=GetImageProperty(image,key); if (value == (const char *) NULL) return((const ImageAttribute *) NULL); if (image->attributes == (void *) NULL) ((Image *) image)->attributes=NewSplayTree(CompareSplayTreeString, RelinquishMagickMemory,DestroyAttribute); else { const ImageAttribute *attribute; attribute=(const ImageAttribute *) GetValueFromSplayTree((SplayTreeInfo *) image->attributes,key); if (attribute != (const ImageAttribute *) NULL) return(attribute); } attribute=(ImageAttribute *) AcquireMagickMemory(sizeof(*attribute)); if (attribute == (ImageAttribute *) NULL) ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed"); (void) ResetMagickMemory(attribute,0,sizeof(*attribute)); attribute->key=ConstantString(key); attribute->value=ConstantString(value); (void) AddValueToSplayTree((SplayTreeInfo *) ((Image *) image)->attributes, attribute->key,attribute); return((const ImageAttribute *) attribute); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e C l i p p i n g P a t h A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageClippingPathAttribute() searches the list of image attributes and % returns a pointer to a clipping path if it exists otherwise NULL. % % Deprecated, replace with: % % GetImageAttribute(image,"8BIM:1999,2998"); % % The format of the GetImageClippingPathAttribute method is: % % const ImageAttribute *GetImageClippingPathAttribute(Image *image) % % A description of each parameter follows: % % o attribute: Method GetImageClippingPathAttribute returns the clipping % path if it exists otherwise NULL. % % o image: the image. % */ MagickExport const ImageAttribute *GetImageClippingPathAttribute(Image *image) { return(GetImageAttribute(image,"8BIM:1999,2998")); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e F r o m M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageFromMagickRegistry() gets an image from the registry as defined by % its name. If the image is not found, a NULL image is returned. % % Deprecated, replace with: % % GetImageRegistry(ImageRegistryType,name,exception); % % The format of the GetImageFromMagickRegistry method is: % % Image *GetImageFromMagickRegistry(const char *name,ssize_t *id, % ExceptionInfo *exception) % % A description of each parameter follows: % % o name: the name of the image to retrieve from the registry. % % o id: the registry id. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GetImageFromMagickRegistry(const char *name,ssize_t *id, ExceptionInfo *exception) { *id=0L; return((Image *) GetImageRegistry(ImageRegistryType,name,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMagickRegistry() gets a blob from the registry as defined by the id. If % the blob that matches the id is not found, NULL is returned. % % The format of the GetMagickRegistry method is: % % const void *GetMagickRegistry(const ssize_t id,RegistryType *type, % size_t *length,ExceptionInfo *exception) % % A description of each parameter follows: % % o id: the registry id. % % o type: the registry type. % % o length: the blob length in number of bytes. % % o exception: return any errors or warnings in this structure. % */ MagickExport void *GetMagickRegistry(const ssize_t id,RegistryType *type, size_t *length,ExceptionInfo *exception) { char key[MaxTextExtent]; void *blob; *type=UndefinedRegistryType; *length=0; (void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id); blob=(void *) GetImageRegistry(ImageRegistryType,key,exception); if (blob != (void *) NULL) return(blob); blob=(void *) GetImageRegistry(ImageInfoRegistryType,key,exception); if (blob != (void *) NULL) return(blob); return((void *) GetImageRegistry(UndefinedRegistryType,key,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t M a g i c k T o k e n % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMagickToken() gets a token from the token stream. A token is defined as % a sequence of characters delimited by whitespace (e.g. clip-path), a % sequence delimited with quotes (.e.g "Quote me"), or a sequence enclosed in % parenthesis (e.g. rgb(0,0,0)). GetMagickToken() also recognizes these % separator characters: ':', '=', ',', and ';'. % % The format of the GetMagickToken method is: % % void GetMagickToken(const char *start,const char **end,char *token) % % A description of each parameter follows: % % o start: the start of the token sequence. % % o end: point to the end of the token sequence. % % o token: copy the token to this buffer. % */ MagickExport void GetMagickToken(const char *start,const char **end,char *token) { GetNextToken(start,end,~0UL,token); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageGeometry() returns a region as defined by the geometry string with % respect to the image and its gravity. % % Deprecated, replace with: % % if (size_to_fit != MagickFalse) % ParseRegionGeometry(image,geometry,region_info,&image->exception); else % ParsePageGeometry(image,geometry,region_info,&image->exception); % % The format of the GetImageGeometry method is: % % int GetImageGeometry(Image *image,const char *geometry, % const unsigned int size_to_fit,RectangeInfo *region_info) % % A description of each parameter follows: % % o flags: Method GetImageGeometry returns a bitmask that indicates % which of the four values were located in the geometry string. % % o geometry: The geometry (e.g. 100x100+10+10). % % o size_to_fit: A value other than 0 means to scale the region so it % fits within the specified width and height. % % o region_info: the region as defined by the geometry string with % respect to the image and its gravity. % */ MagickExport int GetImageGeometry(Image *image,const char *geometry, const unsigned int size_to_fit,RectangleInfo *region_info) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.4"); if (size_to_fit != MagickFalse) return((int) ParseRegionGeometry(image,geometry,region_info,&image->exception)); return((int) ParsePageGeometry(image,geometry,region_info,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageList() returns an image at the specified position in the list. % % Deprecated, replace with: % % CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue, % exception); % % The format of the GetImageList method is: % % Image *GetImageList(const Image *images,const ssize_t offset, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o offset: the position within the list. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *GetImageList(const Image *images,const ssize_t offset, ExceptionInfo *exception) { Image *image; if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); image=CloneImage(GetImageFromList(images,(ssize_t) offset),0,0,MagickTrue, exception); return(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e L i s t I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageListIndex() returns the position in the list of the specified % image. % % Deprecated, replace with: % % GetImageIndexInList(images); % % The format of the GetImageListIndex method is: % % ssize_t GetImageListIndex(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport ssize_t GetImageListIndex(const Image *images) { if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(GetImageIndexInList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e L i s t S i z e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImageListSize() returns the number of images in the list. % % Deprecated, replace with: % % GetImageListLength(images); % % The format of the GetImageListSize method is: % % size_t GetImageListSize(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport size_t GetImageListSize(const Image *images) { if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(GetImageListLength(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetImagePixels() obtains a pixel region for read/write access. If the % region is successfully accessed, a pointer to a PixelPacket array % representing the region is returned, otherwise NULL is returned. % % The returned pointer may point to a temporary working copy of the pixels % or it may point to the original pixels in memory. Performance is maximized % if the selected region is part of one row, or one or more full rows, since % then there is opportunity to access the pixels in-place (without a copy) % if the image is in RAM, or in a memory-mapped file. The returned pointer % should *never* be deallocated by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or if the storage class is % PseduoClass, call GetAuthenticIndexQueue() after invoking GetImagePixels() % to obtain the black color component or colormap indexes (of type IndexPacket) % corresponding to the region. Once the PixelPacket (and/or IndexPacket) % array has been updated, the changes must be saved back to the underlying % image using SyncAuthenticPixels() or they may be lost. % % Deprecated, replace with: % % GetAuthenticPixels(image,x,y,columns,rows,&image->exception); % % The format of the GetImagePixels() method is: % % PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows) % % A description of each parameter follows: % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *GetImagePixels(Image *image,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows) { return(GetAuthenticPixels(image,x,y,columns,rows,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t I n d e x e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetIndexes() returns the black channel or the colormap indexes associated % with the last call to QueueAuthenticPixels() or GetVirtualPixels(). NULL is % returned if the black channel or colormap indexes are not available. % % Deprecated, replace with: % % GetAuthenticIndexQueue(image); % % The format of the GetIndexes() method is: % % IndexPacket *GetIndexes(const Image *image) % % A description of each parameter follows: % % o indexes: GetIndexes() returns the indexes associated with the last % call to QueueAuthenticPixels() or GetAuthenticPixels(). % % o image: the image. % */ MagickExport IndexPacket *GetIndexes(const Image *image) { return(GetAuthenticIndexQueue(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t M a g i c k G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetMagickGeometry() is similar to GetGeometry() except the returned % geometry is modified as determined by the meta characters: %, !, <, >, % and ~. % % Deprecated, replace with: % % ParseMetaGeometry(geometry,x,y,width,height); % % The format of the GetMagickGeometry method is: % % unsigned int GetMagickGeometry(const char *geometry,ssize_t *x,ssize_t *y, % size_t *width,size_t *height) % % A description of each parameter follows: % % o geometry: Specifies a character string representing the geometry % specification. % % o x,y: A pointer to an integer. The x and y offset as determined by % the geometry specification is returned here. % % o width,height: A pointer to an unsigned integer. The width and height % as determined by the geometry specification is returned here. % */ MagickExport unsigned int GetMagickGeometry(const char *geometry,ssize_t *x, ssize_t *y,size_t *width,size_t *height) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3"); return(ParseMetaGeometry(geometry,x,y,width,height)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImage() returns the next image in a list. % % Deprecated, replace with: % % GetNextImageInList(images); % % The format of the GetNextImage method is: % % Image *GetNextImage(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport Image *GetNextImage(const Image *images) { if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(GetNextImageInList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N e x t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNextImageAttribute() gets the next image attribute. % % Deprecated, replace with: % % const char *property; % property=GetNextImageProperty(image); % if (property != (const char *) NULL) % GetImageAttribute(image,property); % % The format of the GetNextImageAttribute method is: % % const ImageAttribute *GetNextImageAttribute(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport const ImageAttribute *GetNextImageAttribute(const Image *image) { const char *property; property=GetNextImageProperty(image); if (property == (const char *) NULL) return((const ImageAttribute *) NULL); return(GetImageAttribute(image,property)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t N u m b e r S c e n e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetNumberScenes() returns the number of images in the list. % % Deprecated, replace with: % % GetImageListLength(image); % % The format of the GetNumberScenes method is: % % unsigned int GetNumberScenes(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport unsigned int GetNumberScenes(const Image *image) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return((unsigned int) GetImageListLength(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t O n e P i x e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetOnePixel() returns a single pixel at the specified (x,y) location. % The image background color is returned if an error occurs. % % Deprecated, replace with: % % GetOneAuthenticPixel(image,x,y,&pixel,&image->exception); % % The format of the GetOnePixel() method is: % % PixelPacket GetOnePixel(const Image image,const ssize_t x,const ssize_t y) % % A description of each parameter follows: % % o image: the image. % % o x,y: These values define the location of the pixel to return. % */ MagickExport PixelPacket GetOnePixel(Image *image,const ssize_t x,const ssize_t y) { PixelPacket pixel; (void) GetOneAuthenticPixel(image,x,y,&pixel,&image->exception); return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPixels() returns the pixels associated with the last call to % QueueAuthenticPixels() or GetAuthenticPixels(). % % Deprecated, replace with: % % GetAuthenticPixelQueue(image); % % The format of the GetPixels() method is: % % PixelPacket *GetPixels(const Image image) % % A description of each parameter follows: % % o pixels: GetPixels() returns the pixels associated with the last call % to QueueAuthenticPixels() or GetAuthenticPixels(). % % o image: the image. % */ MagickExport PixelPacket *GetPixels(const Image *image) { return(GetAuthenticPixelQueue(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t P r e v i o u s I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetPreviousImage() returns the previous image in a list. % % Deprecated, replace with: % % GetPreviousImageInList(images)); % % The format of the GetPreviousImage method is: % % Image *GetPreviousImage(const Image *images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport Image *GetPreviousImage(const Image *images) { if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(GetPreviousImageInList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % H S L T r a n s f o r m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % HSLTransform() converts a (hue, saturation, lightness) to a (red, green, % blue) triple. % % The format of the HSLTransformImage method is: % % void HSLTransform(const double hue,const double saturation, % const double lightness,Quantum *red,Quantum *green,Quantum *blue) % % A description of each parameter follows: % % o hue, saturation, lightness: A double value representing a % component of the HSL color space. % % o red, green, blue: A pointer to a pixel component of type Quantum. % */ static inline MagickRealType HueToRGB(MagickRealType m1,MagickRealType m2, MagickRealType hue) { if (hue < 0.0) hue+=1.0; if (hue > 1.0) hue-=1.0; if ((6.0*hue) < 1.0) return(m1+6.0*(m2-m1)*hue); if ((2.0*hue) < 1.0) return(m2); if ((3.0*hue) < 2.0) return(m1+6.0*(m2-m1)*(2.0/3.0-hue)); return(m1); } MagickExport void HSLTransform(const double hue,const double saturation, const double lightness,Quantum *red,Quantum *green,Quantum *blue) { MagickRealType b, g, r, m1, m2; /* Convert HSL to RGB colorspace. */ assert(red != (Quantum *) NULL); assert(green != (Quantum *) NULL); assert(blue != (Quantum *) NULL); if (lightness <= 0.5) m2=lightness*(saturation+1.0); else m2=lightness+saturation-lightness*saturation; m1=2.0*lightness-m2; r=HueToRGB(m1,m2,hue+1.0/3.0); g=HueToRGB(m1,m2,hue); b=HueToRGB(m1,m2,hue-1.0/3.0); *red=ClampToQuantum((MagickRealType) QuantumRange*r); *green=ClampToQuantum((MagickRealType) QuantumRange*g); *blue=ClampToQuantum((MagickRealType) QuantumRange*b); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I d e n t i t y A f f i n e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IdentityAffine() initializes the affine transform to the identity matrix. % % The format of the IdentityAffine method is: % % IdentityAffine(AffineMatrix *affine) % % A description of each parameter follows: % % o affine: A pointer the affine transform of type AffineMatrix. % */ MagickExport void IdentityAffine(AffineMatrix *affine) { (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); assert(affine != (AffineMatrix *) NULL); (void) ResetMagickMemory(affine,0,sizeof(AffineMatrix)); affine->sx=1.0; affine->sy=1.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n i t i a l i z e M a g i c k % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InitializeMagick() initializes the ImageMagick environment. % % Deprecated, replace with: % % MagickCoreGenesis(path,MagickFalse); % % The format of the InitializeMagick function is: % % InitializeMagick(const char *path) % % A description of each parameter follows: % % o path: the execution path of the current ImageMagick client. % */ MagickExport void InitializeMagick(const char *path) { MagickCoreGenesis(path,MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p o l a t e P i x e l C o l o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpolatePixelColor() applies bi-linear or tri-linear interpolation % between a pixel and it's neighbors. % % The format of the InterpolatePixelColor method is: % % MagickPixelPacket InterpolatePixelColor(const Image *image, % CacheView *view_info,InterpolatePixelMethod method,const double x, % const double y,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o image_view: the image cache view. % % o type: the type of pixel color interpolation. % % o x,y: A double representing the current (x,y) position of the pixel. % % o exception: return any errors or warnings in this structure. % */ static void BicubicInterpolate(const MagickPixelPacket *pixels,const double dx, MagickPixelPacket *pixel) { MagickRealType dx2, p, q, r, s; dx2=dx*dx; p=(pixels[3].red-pixels[2].red)-(pixels[0].red-pixels[1].red); q=(pixels[0].red-pixels[1].red)-p; r=pixels[2].red-pixels[0].red; s=pixels[1].red; pixel->red=(dx*dx2*p)+(dx2*q)+(dx*r)+s; p=(pixels[3].green-pixels[2].green)-(pixels[0].green-pixels[1].green); q=(pixels[0].green-pixels[1].green)-p; r=pixels[2].green-pixels[0].green; s=pixels[1].green; pixel->green=(dx*dx2*p)+(dx2*q)+(dx*r)+s; p=(pixels[3].blue-pixels[2].blue)-(pixels[0].blue-pixels[1].blue); q=(pixels[0].blue-pixels[1].blue)-p; r=pixels[2].blue-pixels[0].blue; s=pixels[1].blue; pixel->blue=(dx*dx2*p)+(dx2*q)+(dx*r)+s; p=(pixels[3].opacity-pixels[2].opacity)-(pixels[0].opacity-pixels[1].opacity); q=(pixels[0].opacity-pixels[1].opacity)-p; r=pixels[2].opacity-pixels[0].opacity; s=pixels[1].opacity; pixel->opacity=(dx*dx2*p)+(dx2*q)+(dx*r)+s; if (pixel->colorspace == CMYKColorspace) { p=(pixels[3].index-pixels[2].index)-(pixels[0].index-pixels[1].index); q=(pixels[0].index-pixels[1].index)-p; r=pixels[2].index-pixels[0].index; s=pixels[1].index; pixel->index=(dx*dx2*p)+(dx2*q)+(dx*r)+s; } } static inline MagickRealType CubicWeightingFunction(const MagickRealType x) { MagickRealType alpha, gamma; alpha=MagickMax(x+2.0,0.0); gamma=1.0*alpha*alpha*alpha; alpha=MagickMax(x+1.0,0.0); gamma-=4.0*alpha*alpha*alpha; alpha=MagickMax(x+0.0,0.0); gamma+=6.0*alpha*alpha*alpha; alpha=MagickMax(x-1.0,0.0); gamma-=4.0*alpha*alpha*alpha; return(gamma/6.0); } static inline double MeshInterpolate(const PointInfo *delta,const double p, const double x,const double y) { return(delta->x*x+delta->y*y+(1.0-delta->x-delta->y)*p); } static inline ssize_t NearestNeighbor(MagickRealType x) { if (x >= 0.0) return((ssize_t) (x+0.5)); return((ssize_t) (x-0.5)); } MagickExport MagickPixelPacket InterpolatePixelColor(const Image *image, CacheView *image_view,const InterpolatePixelMethod method,const double x, const double y,ExceptionInfo *exception) { MagickPixelPacket pixel; register const IndexPacket *indexes; register const PixelPacket *p; register ssize_t i; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); assert(image_view != (CacheView *) NULL); GetMagickPixelPacket(image,&pixel); switch (method) { case AverageInterpolatePixel: { double gamma; MagickPixelPacket pixels[16]; MagickRealType alpha[16]; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t) floor(y)-1,4,4,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 16L; i++) { GetMagickPixelPacket(image,pixels+i); SetMagickPixelPacket(image,p,indexes+i,pixels+i); alpha[i]=1.0; if (image->matte != MagickFalse) { alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[i].red*=alpha[i]; pixels[i].green*=alpha[i]; pixels[i].blue*=alpha[i]; if (image->colorspace == CMYKColorspace) pixels[i].index*=alpha[i]; } gamma=alpha[i]; gamma=PerceptibleReciprocal(gamma); pixel.red+=gamma*0.0625*pixels[i].red; pixel.green+=gamma*0.0625*pixels[i].green; pixel.blue+=gamma*0.0625*pixels[i].blue; pixel.opacity+=0.0625*pixels[i].opacity; if (image->colorspace == CMYKColorspace) pixel.index+=gamma*0.0625*pixels[i].index; p++; } break; } case BicubicInterpolatePixel: { MagickPixelPacket pixels[16], u[4]; MagickRealType alpha[16]; PointInfo delta; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t) floor(y)-1,4,4,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 4L; i++) GetMagickPixelPacket(image,u+i); for (i=0; i < 16L; i++) { GetMagickPixelPacket(image,pixels+i); SetMagickPixelPacket(image,p,indexes+i,pixels+i); alpha[i]=1.0; if (image->matte != MagickFalse) { alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[i].red*=alpha[i]; pixels[i].green*=alpha[i]; pixels[i].blue*=alpha[i]; if (image->colorspace == CMYKColorspace) pixels[i].index*=alpha[i]; } p++; } delta.x=x-floor(x); for (i=0; i < 4L; i++) { GetMagickPixelPacket(image,pixels+4*i); BicubicInterpolate(pixels+4*i,delta.x,u+i); } delta.y=y-floor(y); BicubicInterpolate(u,delta.y,&pixel); break; } case BilinearInterpolatePixel: default: { double gamma; MagickPixelPacket pixels[16]; MagickRealType alpha[16]; PointInfo delta; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t) floor(y),2,2,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 4L; i++) { GetMagickPixelPacket(image,pixels+i); SetMagickPixelPacket(image,p,indexes+i,pixels+i); alpha[i]=1.0; if (image->matte != MagickFalse) { alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[i].red*=alpha[i]; pixels[i].green*=alpha[i]; pixels[i].blue*=alpha[i]; if (image->colorspace == CMYKColorspace) pixels[i].index*=alpha[i]; } p++; } delta.x=x-floor(x); delta.y=y-floor(y); gamma=(((1.0-delta.y)*((1.0-delta.x)*alpha[0]+delta.x*alpha[1])+delta.y* ((1.0-delta.x)*alpha[2]+delta.x*alpha[3]))); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].red+delta.x* pixels[1].red)+delta.y*((1.0-delta.x)*pixels[2].red+delta.x* pixels[3].red)); pixel.green=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].green+delta.x* pixels[1].green)+delta.y*((1.0-delta.x)*pixels[2].green+ delta.x*pixels[3].green)); pixel.blue=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].blue+delta.x* pixels[1].blue)+delta.y*((1.0-delta.x)*pixels[2].blue+delta.x* pixels[3].blue)); pixel.opacity=((1.0-delta.y)*((1.0-delta.x)*pixels[0].opacity+delta.x* pixels[1].opacity)+delta.y*((1.0-delta.x)*pixels[2].opacity+delta.x* pixels[3].opacity)); if (image->colorspace == CMYKColorspace) pixel.index=gamma*((1.0-delta.y)*((1.0-delta.x)*pixels[0].index+delta.x* pixels[1].index)+delta.y*((1.0-delta.x)*pixels[2].index+delta.x* pixels[3].index)); break; } case FilterInterpolatePixel: { Image *excerpt_image, *filter_image; MagickPixelPacket pixels[1]; RectangleInfo geometry; geometry.width=4L; geometry.height=4L; geometry.x=(ssize_t) floor(x)-1L; geometry.y=(ssize_t) floor(y)-1L; excerpt_image=ExcerptImage(image,&geometry,exception); if (excerpt_image == (Image *) NULL) break; filter_image=ResizeImage(excerpt_image,1,1,image->filter,image->blur, exception); excerpt_image=DestroyImage(excerpt_image); if (filter_image == (Image *) NULL) break; p=GetVirtualPixels(filter_image,0,0,1,1,exception); if (p == (const PixelPacket *) NULL) { filter_image=DestroyImage(filter_image); break; } indexes=GetVirtualIndexQueue(filter_image); GetMagickPixelPacket(image,pixels); SetMagickPixelPacket(image,p,indexes,&pixel); filter_image=DestroyImage(filter_image); break; } case IntegerInterpolatePixel: { MagickPixelPacket pixels[1]; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t) floor(y),1,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); GetMagickPixelPacket(image,pixels); SetMagickPixelPacket(image,p,indexes,&pixel); break; } case MeshInterpolatePixel: { double gamma; MagickPixelPacket pixels[4]; MagickRealType alpha[4]; PointInfo delta, luminance; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x),(ssize_t) floor(y),2,2,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); for (i=0; i < 4L; i++) { GetMagickPixelPacket(image,pixels+i); SetMagickPixelPacket(image,p,indexes+i,pixels+i); alpha[i]=1.0; if (image->matte != MagickFalse) { alpha[i]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[i].red*=alpha[i]; pixels[i].green*=alpha[i]; pixels[i].blue*=alpha[i]; if (image->colorspace == CMYKColorspace) pixels[i].index*=alpha[i]; } p++; } delta.x=x-floor(x); delta.y=y-floor(y); luminance.x=MagickPixelLuma(pixels+0)-MagickPixelLuma(pixels+3); luminance.y=MagickPixelLuma(pixels+1)-MagickPixelLuma(pixels+2); if (fabs(luminance.x) < fabs(luminance.y)) { /* Diagonal 0-3 NW-SE. */ if (delta.x <= delta.y) { /* Bottom-left triangle (pixel:2, diagonal: 0-3). */ delta.y=1.0-delta.y; gamma=MeshInterpolate(&delta,alpha[2],alpha[3],alpha[0]); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*MeshInterpolate(&delta,pixels[2].red, pixels[3].red,pixels[0].red); pixel.green=gamma*MeshInterpolate(&delta,pixels[2].green, pixels[3].green,pixels[0].green); pixel.blue=gamma*MeshInterpolate(&delta,pixels[2].blue, pixels[3].blue,pixels[0].blue); pixel.opacity=gamma*MeshInterpolate(&delta,pixels[2].opacity, pixels[3].opacity,pixels[0].opacity); if (image->colorspace == CMYKColorspace) pixel.index=gamma*MeshInterpolate(&delta,pixels[2].index, pixels[3].index,pixels[0].index); } else { /* Top-right triangle (pixel:1, diagonal: 0-3). */ delta.x=1.0-delta.x; gamma=MeshInterpolate(&delta,alpha[1],alpha[0],alpha[3]); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*MeshInterpolate(&delta,pixels[1].red, pixels[0].red,pixels[3].red); pixel.green=gamma*MeshInterpolate(&delta,pixels[1].green, pixels[0].green,pixels[3].green); pixel.blue=gamma*MeshInterpolate(&delta,pixels[1].blue, pixels[0].blue,pixels[3].blue); pixel.opacity=gamma*MeshInterpolate(&delta,pixels[1].opacity, pixels[0].opacity,pixels[3].opacity); if (image->colorspace == CMYKColorspace) pixel.index=gamma*MeshInterpolate(&delta,pixels[1].index, pixels[0].index,pixels[3].index); } } else { /* Diagonal 1-2 NE-SW. */ if (delta.x <= (1.0-delta.y)) { /* Top-left triangle (pixel 0, diagonal: 1-2). */ gamma=MeshInterpolate(&delta,alpha[0],alpha[1],alpha[2]); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*MeshInterpolate(&delta,pixels[0].red, pixels[1].red,pixels[2].red); pixel.green=gamma*MeshInterpolate(&delta,pixels[0].green, pixels[1].green,pixels[2].green); pixel.blue=gamma*MeshInterpolate(&delta,pixels[0].blue, pixels[1].blue,pixels[2].blue); pixel.opacity=gamma*MeshInterpolate(&delta,pixels[0].opacity, pixels[1].opacity,pixels[2].opacity); if (image->colorspace == CMYKColorspace) pixel.index=gamma*MeshInterpolate(&delta,pixels[0].index, pixels[1].index,pixels[2].index); } else { /* Bottom-right triangle (pixel: 3, diagonal: 1-2). */ delta.x=1.0-delta.x; delta.y=1.0-delta.y; gamma=MeshInterpolate(&delta,alpha[3],alpha[2],alpha[1]); gamma=PerceptibleReciprocal(gamma); pixel.red=gamma*MeshInterpolate(&delta,pixels[3].red, pixels[2].red,pixels[1].red); pixel.green=gamma*MeshInterpolate(&delta,pixels[3].green, pixels[2].green,pixels[1].green); pixel.blue=gamma*MeshInterpolate(&delta,pixels[3].blue, pixels[2].blue,pixels[1].blue); pixel.opacity=gamma*MeshInterpolate(&delta,pixels[3].opacity, pixels[2].opacity,pixels[1].opacity); if (image->colorspace == CMYKColorspace) pixel.index=gamma*MeshInterpolate(&delta,pixels[3].index, pixels[2].index,pixels[1].index); } } break; } case NearestNeighborInterpolatePixel: { MagickPixelPacket pixels[1]; p=GetCacheViewVirtualPixels(image_view,NearestNeighbor(x), NearestNeighbor(y),1,1,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); GetMagickPixelPacket(image,pixels); SetMagickPixelPacket(image,p,indexes,&pixel); break; } case SplineInterpolatePixel: { double gamma; MagickPixelPacket pixels[16]; MagickRealType alpha[16], dx, dy; PointInfo delta; ssize_t j, n; p=GetCacheViewVirtualPixels(image_view,(ssize_t) floor(x)-1,(ssize_t) floor(y)-1,4,4,exception); if (p == (const PixelPacket *) NULL) break; indexes=GetCacheViewVirtualIndexQueue(image_view); n=0; delta.x=x-floor(x); delta.y=y-floor(y); for (i=(-1); i < 3L; i++) { dy=CubicWeightingFunction((MagickRealType) i-delta.y); for (j=(-1); j < 3L; j++) { GetMagickPixelPacket(image,pixels+n); SetMagickPixelPacket(image,p,indexes+n,pixels+n); alpha[n]=1.0; if (image->matte != MagickFalse) { alpha[n]=QuantumScale*((MagickRealType) GetPixelAlpha(p)); pixels[n].red*=alpha[n]; pixels[n].green*=alpha[n]; pixels[n].blue*=alpha[n]; if (image->colorspace == CMYKColorspace) pixels[n].index*=alpha[n]; } dx=CubicWeightingFunction(delta.x-(MagickRealType) j); gamma=alpha[n]; gamma=PerceptibleReciprocal(gamma); pixel.red+=gamma*dx*dy*pixels[n].red; pixel.green+=gamma*dx*dy*pixels[n].green; pixel.blue+=gamma*dx*dy*pixels[n].blue; if (image->matte != MagickFalse) pixel.opacity+=dx*dy*pixels[n].opacity; if (image->colorspace == CMYKColorspace) pixel.index+=gamma*dx*dy*pixels[n].index; n++; p++; } } break; } } return(pixel); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n t e r p r e t I m a g e A t t r i b u t e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InterpretImageAttributes() replaces any embedded formatting characters with % the appropriate image attribute and returns the translated text. % % Deprecated, replace with: % % InterpretImageProperties(image_info,image,embed_text); % % The format of the InterpretImageAttributes method is: % % char *InterpretImageAttributes(const ImageInfo *image_info,Image *image, % const char *embed_text) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o embed_text: the address of a character string containing the embedded % formatting characters. % */ MagickExport char *InterpretImageAttributes(const ImageInfo *image_info, Image *image,const char *embed_text) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1"); return(InterpretImageProperties(image_info,image,embed_text)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I n v e r s e s R G B C o m p a n d o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % InversesRGBCompandor() removes the gamma function from a sRGB pixel. % % The format of the InversesRGBCompandor method is: % % MagickRealType InversesRGBCompandor(const MagickRealType pixel) % % A description of each parameter follows: % % o pixel: the pixel. % */ MagickExport MagickRealType InversesRGBCompandor(const MagickRealType pixel) { if (pixel <= (0.0404482362771076*QuantumRange)) return(pixel/12.92); return(QuantumRange*pow((QuantumScale*pixel+0.055)/1.055,2.4)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % I s M a g i c k I n s t a n t i a t e d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsMagickInstantiated() returns MagickTrue if the ImageMagick environment % is currently instantiated: MagickCoreGenesis() has been called but % MagickDestroy() has not. % % The format of the IsMagickInstantiated method is: % % MagickBooleanType IsMagickInstantiated(void) % */ MagickExport MagickBooleanType IsMagickInstantiated(void) { return(IsMagickCoreInstantiated()); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + I s S u b i m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % IsSubimage() returns MagickTrue if the geometry is a valid subimage % specification (e.g. [1], [1-9], [1,7,4]). % % The format of the IsSubimage method is: % % unsigned int IsSubimage(const char *geometry,const unsigned int pedantic) % % A description of each parameter follows: % % o geometry: This string is the geometry specification. % % o pedantic: A value other than 0 invokes a more restrictive set of % conditions for a valid specification (e.g. [1], [1-4], [4-1]). % */ MagickExport unsigned int IsSubimage(const char *geometry, const unsigned int pedantic) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (geometry == (const char *) NULL) return(MagickFalse); if ((strchr(geometry,'x') != (char *) NULL) || (strchr(geometry,'X') != (char *) NULL)) return(MagickFalse); if ((pedantic != MagickFalse) && (strchr(geometry,',') != (char *) NULL)) return(MagickFalse); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L e v e l I m a g e C o l o r s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LevelImageColor() will map the given color to "black" and "white" % values, limearly spreading out the colors, and level values on a channel by % channel bases, as per LevelImage(). The given colors allows you to specify % different level ranges for each of the color channels separately. % % If the boolean 'invert' is set true the image values will modifyed in the % reverse direction. That is any existing "black" and "white" colors in the % image will become the color values given, with all other values compressed % appropriatally. This effectivally maps a greyscale gradient into the given % color gradient. % % Deprecated, replace with: % % LevelColorsImageChannel(image,channel,black_color,white_color,invert); % % The format of the LevelImageColors method is: % % MagickBooleanType LevelImageColors(Image *image,const ChannelType channel, % const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, % const MagickBooleanType invert) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel. % % o black_color: The color to map black to/from % % o white_point: The color to map white to/from % % o invert: if true map the colors (levelize), rather than from (level) % */ MagickBooleanType LevelImageColors(Image *image,const ChannelType channel, const MagickPixelPacket *black_color,const MagickPixelPacket *white_color, const MagickBooleanType invert) { return(LevelColorsImageChannel(image,channel,black_color,white_color,invert)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i b e r a t e M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiberateMemory() frees memory that has already been allocated, and NULL's % the pointer to it. % % The format of the LiberateMemory method is: % % void LiberateMemory(void **memory) % % A description of each parameter follows: % % o memory: A pointer to a block of memory to free for reuse. % */ MagickExport void LiberateMemory(void **memory) { assert(memory != (void **) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (*memory == (void *) NULL) return; free(*memory); *memory=(void *) NULL; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i b e r a t e S e m a p h o r e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % LiberateSemaphoreInfo() relinquishes a semaphore. % % Deprecated, replace with: % % UnlockSemaphoreInfo(*semaphore_info); % % The format of the LiberateSemaphoreInfo method is: % % LiberateSemaphoreInfo(void **semaphore_info) % % A description of each parameter follows: % % o semaphore_info: Specifies a pointer to an SemaphoreInfo structure. % */ MagickExport void LiberateSemaphoreInfo(SemaphoreInfo **semaphore_info) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); UnlockSemaphoreInfo(*semaphore_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k I n c a r n a t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickIncarnate() initializes the ImageMagick environment. % % Deprecated, replace with: % % MagickCoreGenesis(path,MagickFalse); % % The format of the MagickIncarnate function is: % % MagickIncarnate(const char *path) % % A description of each parameter follows: % % o path: the execution path of the current ImageMagick client. % */ MagickExport void MagickIncarnate(const char *path) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1"); MagickCoreGenesis(path,MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a g i c k M o n i t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MagickMonitor() calls the monitor handler method with a text string that % describes the task and a measure of completion. The method returns % MagickTrue on success otherwise MagickFalse if an error is encountered, e.g. % if there was a user interrupt. % % The format of the MagickMonitor method is: % % MagickBooleanType MagickMonitor(const char *text, % const MagickOffsetType offset,const MagickSizeType span, % void *client_data) % % A description of each parameter follows: % % o offset: the position relative to the span parameter which represents % how much progress has been made toward completing a task. % % o span: the span relative to completing a task. % % o client_data: the client data. % */ MagickExport MagickBooleanType MagickMonitor(const char *text, const MagickOffsetType offset,const MagickSizeType span, void *magick_unused(client_data)) { ExceptionInfo *exception; MagickBooleanType status; magick_unreferenced(client_data); assert(text != (const char *) NULL); (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",text); ProcessPendingEvents(text); status=MagickTrue; exception=AcquireExceptionInfo(); if (monitor_handler != (MonitorHandler) NULL) status=(*monitor_handler)(text,offset,span,exception); exception=DestroyExceptionInfo(exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MapImage() replaces the colors of an image with the closest color from a % reference image. % % Deprecated, replace with: % % QuantizeInfo quantize_info; % GetQuantizeInfo(&quantize_info); % quantize_info.dither=dither; % RemapImage(&quantize_info,image,map_image); % % The format of the MapImage method is: % % MagickBooleanType MapImage(Image *image,const Image *map_image, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to an Image structure. % % o map_image: the image. Reduce image to a set of colors represented by % this image. % % o dither: Set this integer value to something other than zero to % dither the mapped image. % */ MagickExport MagickBooleanType MapImage(Image *image,const Image *map_image, const MagickBooleanType dither) { QuantizeInfo quantize_info; /* Initialize color cube. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(map_image != (Image *) NULL); assert(map_image->signature == MagickCoreSignature); GetQuantizeInfo(&quantize_info); quantize_info.dither=dither; return(RemapImage(&quantize_info,image,map_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a p I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MapImages() replaces the colors of a sequence of images with the closest % color from a reference image. % % Deprecated, replace with: % % QuantizeInfo quantize_info; % GetQuantizeInfo(&quantize_info); % quantize_info.dither=dither; % RemapImages(&quantize_info,images,map_image); % % The format of the MapImage method is: % % MagickBooleanType MapImages(Image *images,Image *map_image, % const MagickBooleanType dither) % % A description of each parameter follows: % % o image: Specifies a pointer to a set of Image structures. % % o map_image: the image. Reduce image to a set of colors represented by % this image. % % o dither: Set this integer value to something other than zero to % dither the quantized image. % */ MagickExport MagickBooleanType MapImages(Image *images,const Image *map_image, const MagickBooleanType dither) { QuantizeInfo quantize_info; assert(images != (Image *) NULL); assert(images->signature == MagickCoreSignature); if (images->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",images->filename); GetQuantizeInfo(&quantize_info); quantize_info.dither=dither; return(RemapImages(&quantize_info,images,map_image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a t t e F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MatteFloodfill() changes the transparency value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod % is specified, the transparency value is changed for any neighbor pixel % that does not match the bordercolor member of image. % % By default target must match a particular pixel transparency exactly. % However, in many cases two transparency values may differ by a % small amount. The fuzz member of image defines how much tolerance is % acceptable to consider two transparency values as the same. For example, % set fuzz to 10 and the opacity values of 100 and 102 respectively are % now interpreted as the same value for the purposes of the floodfill. % % The format of the MatteFloodfillImage method is: % % MagickBooleanType MatteFloodfillImage(Image *image, % const PixelPacket target,const Quantum opacity,const ssize_t x_offset, % const ssize_t y_offset,const PaintMethod method) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o opacity: the level of transparency: 0 is fully opaque and QuantumRange is % fully transparent. % % o x,y: the starting location of the operation. % % o method: Choose either FloodfillMethod or FillToBorderMethod. % */ MagickExport MagickBooleanType MatteFloodfillImage(Image *image, const PixelPacket target,const Quantum opacity,const ssize_t x_offset, const ssize_t y_offset,const PaintMethod method) { Image *floodplane_image; MagickBooleanType skip; register SegmentInfo *s; SegmentInfo *segment_stack; ssize_t offset, start, x, x1, x2, y; /* Check boundary conditions. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if ((x_offset < 0) || (x_offset >= (ssize_t) image->columns)) return(MagickFalse); if ((y_offset < 0) || (y_offset >= (ssize_t) image->rows)) return(MagickFalse); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); floodplane_image=CloneImage(image,image->columns,image->rows,MagickTrue, &image->exception); if (floodplane_image == (Image *) NULL) return(MagickFalse); (void) SetImageAlphaChannel(floodplane_image,OpaqueAlphaChannel); /* Set floodfill color. */ segment_stack=(SegmentInfo *) AcquireQuantumMemory(MaxStacksize, sizeof(*segment_stack)); if (segment_stack == (SegmentInfo *) NULL) { floodplane_image=DestroyImage(floodplane_image); ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); } /* Push initial segment on stack. */ x=x_offset; y=y_offset; start=0; s=segment_stack; PushSegmentStack(y,x,x,1); PushSegmentStack(y+1,x,x,-1); while (s > segment_stack) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; /* Pop segment off stack. */ s--; x1=(ssize_t) s->x1; x2=(ssize_t) s->x2; offset=(ssize_t) s->y2; y=(ssize_t) s->y1+offset; /* Recolor neighboring pixels. */ p=GetVirtualPixels(image,0,y,(size_t) (x1+1),1,&image->exception); q=GetAuthenticPixels(floodplane_image,0,y,(size_t) (x1+1),1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; p+=x1; q+=x1; for (x=x1; x >= 0; x--) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) == MagickFalse) break; } else if (IsColorSimilar(image,p,&target) != MagickFalse) break; q->opacity=(Quantum) TransparentOpacity; q--; p--; } if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse) break; skip=x >= x1 ? MagickTrue : MagickFalse; if (skip == MagickFalse) { start=x+1; if (start < x1) PushSegmentStack(y,start,x1-1,-offset); x=x1+1; } do { if (skip == MagickFalse) { if (x < (ssize_t) image->columns) { p=GetVirtualPixels(image,x,y,image->columns-x,1, &image->exception); q=GetAuthenticPixels(floodplane_image,x,y,image->columns-x,1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for ( ; x < (ssize_t) image->columns; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) == MagickFalse) break; } else if (IsColorSimilar(image,p,&target) != MagickFalse) break; q->opacity=(Quantum) TransparentOpacity; q++; p++; } if (SyncAuthenticPixels(floodplane_image,&image->exception) == MagickFalse) break; } PushSegmentStack(y,start,x-1,offset); if (x > (x2+1)) PushSegmentStack(y,x2+1,x-1,-offset); } skip=MagickFalse; x++; if (x <= x2) { p=GetVirtualPixels(image,x,y,(size_t) (x2-x+1),1, &image->exception); q=GetAuthenticPixels(floodplane_image,x,y,(size_t) (x2-x+1),1, &image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for ( ; x <= x2; x++) { if (q->opacity == (Quantum) TransparentOpacity) break; if (method == FloodfillMethod) { if (IsColorSimilar(image,p,&target) != MagickFalse) break; } else if (IsColorSimilar(image,p,&target) == MagickFalse) break; p++; q++; } } start=x; } while (x <= x2); } for (y=0; y < (ssize_t) image->rows; y++) { register const PixelPacket *magick_restrict p; register ssize_t x; register PixelPacket *magick_restrict q; /* Tile fill color onto floodplane. */ p=GetVirtualPixels(floodplane_image,0,y,image->columns,1, &image->exception); q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if ((p == (const PixelPacket *) NULL) || (q == (PixelPacket *) NULL)) break; for (x=0; x < (ssize_t) image->columns; x++) { if (GetPixelOpacity(p) != OpaqueOpacity) q->opacity=opacity; p++; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } segment_stack=(SegmentInfo *) RelinquishMagickMemory(segment_stack); floodplane_image=DestroyImage(floodplane_image); return(y == (ssize_t) image->rows ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M a x i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MaximumImages() returns the maximum intensity of an image sequence. % % Deprecated, replace with: % % EvaluateImages(images,MinEvaluateOperator,exception); % % The format of the MaxImages method is: % % Image *MaximumImages(Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MaximumImages(const Image *images,ExceptionInfo *exception) { return(EvaluateImages(images,MinEvaluateOperator,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M i n i m u m I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MinimumImages() returns the minimum intensity of an image sequence. % % Deprecated, replace with: % % EvaluateImages(images,MinEvaluateOperator,exception); % % The format of the MinimumImages method is: % % Image *MinimumImages(Image *images,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image sequence. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MinimumImages(const Image *images,ExceptionInfo *exception) { return(EvaluateImages(images,MinEvaluateOperator,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M e d i a n F i l t e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MedianFilterImage() applies a digital filter that improves the quality % of a noisy image. Each pixel is replaced by the median in a set of % neighboring pixels as defined by radius. % % The algorithm was contributed by Mike Edmonds and implements an insertion % sort for selecting median color-channel values. For more on this algorithm % see "Skip Lists: A probabilistic Alternative to Balanced Trees" by William % Pugh in the June 1990 of Communications of the ACM. % % The format of the MedianFilterImage method is: % % Image *MedianFilterImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MedianFilterImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *median_image; median_image=StatisticImage(image,MedianStatistic,(size_t) radius,(size_t) radius,exception); return(median_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o d e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ModeImage() makes each pixel the 'predominant color' of the neighborhood % of the specified radius. % % The format of the ModeImage method is: % % Image *ModeImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ModeImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *mode_image; mode_image=StatisticImage(image,ModeStatistic,(size_t) radius,(size_t) radius, exception); return(mode_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % M o s a i c I m a g e s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % MosaicImages() Obsolete Function: Use MergeImageLayers() instead. % % Deprecated, replace with: % % MergeImageLayers(image,MosaicLayer,exception); % % The format of the MosaicImage method is: % % Image *MosaicImages(const Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image list to be composited together % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *MosaicImages(Image *image,ExceptionInfo *exception) { return(MergeImageLayers(image,MosaicLayer,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p a q u e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpaqueImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the OpaqueImage method is: % % MagickBooleanType OpaqueImage(Image *image, % const PixelPacket *target,const PixelPacket fill) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o fill: the replacement color. % */ MagickExport MagickBooleanType OpaqueImage(Image *image, const PixelPacket target,const PixelPacket fill) { #define OpaqueImageTag "Opaque/Image" MagickBooleanType proceed; register ssize_t i; ssize_t y; /* Make image color opaque. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0"); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); switch (image->storage_class) { case DirectClass: default: { /* Make DirectClass image opaque. */ for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) != MagickFalse) *q=fill; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; proceed=SetImageProgress(image,OpaqueImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } break; } case PseudoClass: { /* Make PseudoClass image opaque. */ for (i=0; i < (ssize_t) image->colors; i++) { if (IsColorSimilar(image,&image->colormap[i],&target) != MagickFalse) image->colormap[i]=fill; } if (fill.opacity != OpaqueOpacity) { for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) != MagickFalse) q->opacity=fill.opacity; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } } (void) SyncImage(image); break; } } if (fill.opacity != OpaqueOpacity) image->matte=MagickTrue; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p e n C a c h e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenCacheView() opens a view into the pixel cache, using the % VirtualPixelMethod that is defined within the given image itself. % % Deprecated, replace with: % % AcquireVirtualCacheView(image,&image->exception); % % The format of the OpenCacheView method is: % % CacheView *OpenCacheView(const Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport CacheView *OpenCacheView(const Image *image) { return(AcquireVirtualCacheView(image,&((Image *) image)->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O p e n M a g i c k S t r e a m % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OpenMagickStream() opens the file at the specified path and return the % associated stream. % % The path of the OpenMagickStream method is: % % FILE *OpenMagickStream(const char *path,const char *mode) % % A description of each parameter follows. % % o path: the file path. % % o mode: the file mode. % */ #if defined(MAGICKCORE_HAVE__WFOPEN) static size_t UTF8ToUTF16(const unsigned char *utf8,wchar_t *utf16) { register const unsigned char *p; if (utf16 != (wchar_t *) NULL) { register wchar_t *q; wchar_t c; /* Convert UTF-8 to UTF-16. */ q=utf16; for (p=utf8; *p != '\0'; p++) { if ((*p & 0x80) == 0) *q=(*p); else if ((*p & 0xE0) == 0xC0) { c=(*p); *q=(c & 0x1F) << 6; p++; if ((*p & 0xC0) != 0x80) return(0); *q|=(*p & 0x3F); } else if ((*p & 0xF0) == 0xE0) { c=(*p); *q=c << 12; p++; if ((*p & 0xC0) != 0x80) return(0); c=(*p); *q|=(c & 0x3F) << 6; p++; if ((*p & 0xC0) != 0x80) return(0); *q|=(*p & 0x3F); } else return(0); q++; } *q++='\0'; return(q-utf16); } /* Compute UTF-16 string length. */ for (p=utf8; *p != '\0'; p++) { if ((*p & 0x80) == 0) ; else if ((*p & 0xE0) == 0xC0) { p++; if ((*p & 0xC0) != 0x80) return(0); } else if ((*p & 0xF0) == 0xE0) { p++; if ((*p & 0xC0) != 0x80) return(0); p++; if ((*p & 0xC0) != 0x80) return(0); } else return(0); } return(p-utf8); } static wchar_t *ConvertUTF8ToUTF16(const unsigned char *source) { size_t length; wchar_t *utf16; length=UTF8ToUTF16(source,(wchar_t *) NULL); if (length == 0) { register ssize_t i; /* Not UTF-8, just copy. */ length=strlen((const char *) source); utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16)); if (utf16 == (wchar_t *) NULL) return((wchar_t *) NULL); for (i=0; i <= (ssize_t) length; i++) utf16[i]=source[i]; return(utf16); } utf16=(wchar_t *) AcquireQuantumMemory(length+1,sizeof(*utf16)); if (utf16 == (wchar_t *) NULL) return((wchar_t *) NULL); length=UTF8ToUTF16(source,utf16); return(utf16); } #endif MagickExport FILE *OpenMagickStream(const char *path,const char *mode) { FILE *file; if ((path == (const char *) NULL) || (mode == (const char *) NULL)) { errno=EINVAL; return((FILE *) NULL); } file=(FILE *) NULL; #if defined(MAGICKCORE_HAVE__WFOPEN) { wchar_t *unicode_mode, *unicode_path; unicode_path=ConvertUTF8ToUTF16((const unsigned char *) path); if (unicode_path == (wchar_t *) NULL) return((FILE *) NULL); unicode_mode=ConvertUTF8ToUTF16((const unsigned char *) mode); if (unicode_mode == (wchar_t *) NULL) { unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path); return((FILE *) NULL); } file=_wfopen(unicode_path,unicode_mode); unicode_mode=(wchar_t *) RelinquishMagickMemory(unicode_mode); unicode_path=(wchar_t *) RelinquishMagickMemory(unicode_path); } #endif if (file == (FILE *) NULL) file=fopen(path,mode); return(file); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P a i n t F l o o d f i l l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PaintFloodfill() changes the color value of any pixel that matches % target and is an immediate neighbor. If the method FillToBorderMethod is % specified, the color value is changed for any neighbor pixel that does not % match the bordercolor member of image. % % By default target must match a particular pixel color exactly. % However, in many cases two colors may differ by a small amount. The % fuzz member of image defines how much tolerance is acceptable to % consider two colors as the same. For example, set fuzz to 10 and the % color red at intensities of 100 and 102 respectively are now % interpreted as the same color for the purposes of the floodfill. % % Deprecated, replace with: % % FloodfillPaintImage(image,channel,draw_info,target,x,y, % method == FloodfillMethod ? MagickFalse : MagickTrue); % % The format of the PaintFloodfillImage method is: % % MagickBooleanType PaintFloodfillImage(Image *image, % const ChannelType channel,const MagickPixelPacket target, % const ssize_t x,const ssize_t y,const DrawInfo *draw_info, % const PaintMethod method) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o x,y: the starting location of the operation. % % o draw_info: the draw info. % % o method: Choose either FloodfillMethod or FillToBorderMethod. % */ MagickExport MagickBooleanType PaintFloodfillImage(Image *image, const ChannelType channel,const MagickPixelPacket *target,const ssize_t x, const ssize_t y,const DrawInfo *draw_info,const PaintMethod method) { MagickBooleanType status; status=FloodfillPaintImage(image,channel,draw_info,target,x,y, method == FloodfillMethod ? MagickFalse : MagickTrue); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % P a i n t O p a q u e I m a g e % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PaintOpaqueImage() changes any pixel that matches color with the color % defined by fill. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % Deprecated, replace with: % % OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse); % OpaquePaintImageChannel(image,channel,target,fill,MagickFalse); % % The format of the PaintOpaqueImage method is: % % MagickBooleanType PaintOpaqueImage(Image *image, % const PixelPacket *target,const PixelPacket *fill) % MagickBooleanType PaintOpaqueImageChannel(Image *image, % const ChannelType channel,const PixelPacket *target, % const PixelPacket *fill) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel(s). % % o target: the RGB value of the target color. % % o fill: the replacement color. % */ MagickExport MagickBooleanType PaintOpaqueImage(Image *image, const MagickPixelPacket *target,const MagickPixelPacket *fill) { MagickBooleanType status; status=OpaquePaintImageChannel(image,DefaultChannels,target,fill,MagickFalse); return(status); } MagickExport MagickBooleanType PaintOpaqueImageChannel(Image *image, const ChannelType channel,const MagickPixelPacket *target, const MagickPixelPacket *fill) { return(OpaquePaintImageChannel(image,channel,target,fill,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P a i n t T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PaintTransparentImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % Deprecated, replace with: % % TransparentPaintImage(image,target,opacity,MagickFalse); % % The format of the PaintTransparentImage method is: % % MagickBooleanType PaintTransparentImage(Image *image, % const MagickPixelPacket *target,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o opacity: the replacement opacity value. % */ MagickExport MagickBooleanType PaintTransparentImage(Image *image, const MagickPixelPacket *target,const Quantum opacity) { return(TransparentPaintImage(image,target,opacity,MagickFalse)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + P a r s e I m a g e G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ParseImageGeometry() is similar to GetGeometry() except the returned % geometry is modified as determined by the meta characters: %, !, <, % and >. % % Deprecated, replace with: % % ParseMetaGeometry(geometry,x,y,width,height); % % The format of the ParseImageGeometry method is: % % int ParseImageGeometry(char *geometry,ssize_t *x,ssize_t *y, % size_t *width,size_t *height) % % A description of each parameter follows: % % o flags: Method ParseImageGeometry returns a bitmask that indicates % which of the four values were located in the geometry string. % % o image_geometry: Specifies a character string representing the geometry % specification. % % o x,y: A pointer to an integer. The x and y offset as determined by % the geometry specification is returned here. % % o width,height: A pointer to an unsigned integer. The width and height % as determined by the geometry specification is returned here. % */ MagickExport int ParseImageGeometry(const char *geometry,ssize_t *x,ssize_t *y, size_t *width,size_t *height) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1"); return((int) ParseMetaGeometry(geometry,x,y,width,height)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P a r s e S i z e G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ParseSizeGeometry() returns a region as defined by the geometry string with % respect to the image dimensions and aspect ratio. % % Deprecated, replace with: % % ParseMetaGeometry(geometry,&region_info->x,&region_info->y, % &region_info->width,&region_info->height); % % The format of the ParseSizeGeometry method is: % % MagickStatusType ParseSizeGeometry(const Image *image, % const char *geometry,RectangeInfo *region_info) % % A description of each parameter follows: % % o geometry: The geometry (e.g. 100x100+10+10). % % o region_info: the region as defined by the geometry string. % */ MagickExport MagickStatusType ParseSizeGeometry(const Image *image, const char *geometry,RectangleInfo *region_info) { MagickStatusType flags; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.4.7"); SetGeometry(image,region_info); flags=ParseMetaGeometry(geometry,&region_info->x,&region_info->y, &region_info->width,&region_info->height); return(flags); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o p I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PopImageList() removes the last image in the list. % % Deprecated, replace with: % % RemoveLastImageFromList(images); % % The format of the PopImageList method is: % % Image *PopImageList(Image **images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport Image *PopImageList(Image **images) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(RemoveLastImageFromList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o p I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PopImagePixels() transfers one or more pixel components from the image pixel % cache to a user supplied buffer. The pixels are returned in network byte % order. MagickTrue is returned if the pixels are successfully transferred, % otherwise MagickFalse. % % The format of the PopImagePixels method is: % % size_t PopImagePixels(Image *,const QuantumType quantum, % unsigned char *destination) % % A description of each parameter follows: % % o image: the image. % % o quantum: Declare which pixel components to transfer (RGB, RGBA, etc). % % o destination: The components are transferred to this buffer. % */ MagickExport size_t PopImagePixels(Image *image,const QuantumType quantum, unsigned char *destination) { QuantumInfo *quantum_info; size_t length; quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image); if (quantum_info == (QuantumInfo *) NULL) return(0); length=ExportQuantumPixels(image,(const CacheView *) NULL,quantum_info, quantum,destination,&image->exception); quantum_info=DestroyQuantumInfo(quantum_info); return(length); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P o s t s c r i p t G e o m e t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PostscriptGeometry() replaces any page mneumonic with the equivalent size in % picas. % % Deprecated, replace with: % % GetPageGeometry(page); % % The format of the PostscriptGeometry method is: % % char *PostscriptGeometry(const char *page) % % A description of each parameter follows. % % o page: Specifies a pointer to an array of characters. % The string is either a Postscript page name (e.g. A4) or a postscript % page geometry (e.g. 612x792+36+36). % */ MagickExport char *PostscriptGeometry(const char *page) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1"); return(GetPageGeometry(page)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P u s h I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PushImageList() adds an image to the end of the list. % % Deprecated, replace with: % % AppendImageToList(images,CloneImageList(image,exception)); % % The format of the PushImageList method is: % % unsigned int PushImageList(Image *images,const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int PushImageList(Image **images,const Image *image, ExceptionInfo *exception) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); AppendImageToList(images,CloneImageList(image,exception)); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P u s h I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PushImagePixels() transfers one or more pixel components from a user % supplied buffer into the image pixel cache of an image. The pixels are % expected in network byte order. It returns MagickTrue if the pixels are % successfully transferred, otherwise MagickFalse. % % The format of the PushImagePixels method is: % % size_t PushImagePixels(Image *image,const QuantumType quantum, % const unsigned char *source) % % A description of each parameter follows: % % o image: the image. % % o quantum: Declare which pixel components to transfer (red, green, blue, % opacity, RGB, or RGBA). % % o source: The pixel components are transferred from this buffer. % */ MagickExport size_t PushImagePixels(Image *image,const QuantumType quantum, const unsigned char *source) { QuantumInfo *quantum_info; size_t length; quantum_info=AcquireQuantumInfo((const ImageInfo *) NULL,image); if (quantum_info == (QuantumInfo *) NULL) return(0); length=ImportQuantumPixels(image,(CacheView *) NULL,quantum_info,quantum, source,&image->exception); quantum_info=DestroyQuantumInfo(quantum_info); return(length); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Q u a n t i z a t i o n E r r o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % QuantizationError() measures the difference between the original and % quantized images. This difference is the total quantization error. The % error is computed by summing over all pixels in an image the distance % squared in RGB space between each reference pixel value and its quantized % value. These values are computed: % % o mean_error_per_pixel: This value is the mean error for any single % pixel in the image. % % o normalized_mean_square_error: This value is the normalized mean % quantization error for any single pixel in the image. This distance % measure is normalized to a range between 0 and 1. It is independent % of the range of red, green, and blue values in the image. % % o normalized_maximum_square_error: Thsi value is the normalized % maximum quantization error for any single pixel in the image. This % distance measure is normalized to a range between 0 and 1. It is % independent of the range of red, green, and blue values in your image. % % Deprecated, replace with: % % GetImageQuantizeError(image); % % The format of the QuantizationError method is: % % unsigned int QuantizationError(Image *image) % % A description of each parameter follows. % % o image: Specifies a pointer to an Image structure; returned from % ReadImage. % */ MagickExport unsigned int QuantizationError(Image *image) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.3"); return(GetImageQuantizeError(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a d i a l B l u r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RadialBlurImage() applies a radial blur to the image. % % Andrew Protano contributed this effect. % % The format of the RadialBlurImage method is: % % Image *RadialBlurImage(const Image *image,const double angle, % ExceptionInfo *exception) % Image *RadialBlurImageChannel(const Image *image,const ChannelType channel, % const double angle,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel type. % % o angle: the angle of the radial blur. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RadialBlurImage(const Image *image,const double angle, ExceptionInfo *exception) { return(RotationalBlurImage(image,angle,exception)); } MagickExport Image *RadialBlurImageChannel(const Image *image, const ChannelType channel,const double angle,ExceptionInfo *exception) { return(RotationalBlurImageChannel(image,channel,angle,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % R a n d o m C h a n n e l T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomChannelThresholdImage() changes the value of individual pixels based % on the intensity of each pixel compared to a random threshold. The result % is a low-contrast, two color image. % % The format of the RandomChannelThresholdImage method is: % % unsigned int RandomChannelThresholdImage(Image *image, % const char *channel, const char *thresholds, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o channel: the channel or channels to be thresholded. % % o thresholds: a geometry string containing LOWxHIGH thresholds. % If the string contains 2x2, 3x3, or 4x4, then an ordered % dither of order 2, 3, or 4 will be performed instead. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int RandomChannelThresholdImage(Image *image, const char *channel,const char *thresholds,ExceptionInfo *exception) { #define RandomChannelThresholdImageText " RandomChannelThreshold image... " double lower_threshold, upper_threshold; RandomInfo *random_info; ssize_t count, y; static MagickRealType o2[4]={0.2f, 0.6f, 0.8f, 0.4f}, o3[9]={0.1f, 0.6f, 0.3f, 0.7f, 0.5f, 0.8f, 0.4f, 0.9f, 0.2f}, o4[16]={0.1f, 0.7f, 1.1f, 0.3f, 1.0f, 0.5f, 1.5f, 0.8f, 1.4f, 1.6f, 0.6f, 1.2f, 0.4f, 0.9f, 1.3f, 0.2f}, threshold=128; size_t order; /* Threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (thresholds == (const char *) NULL) return(MagickTrue); lower_threshold=0; upper_threshold=0; if (LocaleCompare(thresholds,"2x2") == 0) order=2; else if (LocaleCompare(thresholds,"3x3") == 0) order=3; else if (LocaleCompare(thresholds,"4x4") == 0) order=4; else { order=1; count=(ssize_t) sscanf(thresholds,"%lf[/x%%]%lf",&lower_threshold, &upper_threshold); if (strchr(thresholds,'%') != (char *) NULL) { upper_threshold*=(.01*QuantumRange); lower_threshold*=(.01*QuantumRange); } if (count == 1) upper_threshold=(MagickRealType) QuantumRange-lower_threshold; } if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " RandomChannelThresholdImage: channel type=%s",channel); if (image->debug != MagickFalse) (void) LogMagickEvent(TransformEvent,GetMagickModule(), " Thresholds: %s (%fx%f)",thresholds,lower_threshold,upper_threshold); if (LocaleCompare(channel,"all") == 0 || LocaleCompare(channel,"intensity") == 0) if (AcquireImageColormap(image,2) == MagickFalse) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); random_info=AcquireRandomInfo(); for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register IndexPacket index, *magick_restrict indexes; register PixelPacket *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,exception); if (q == (PixelPacket *) NULL) break; if (LocaleCompare(channel,"all") == 0 || LocaleCompare(channel,"intensity") == 0) { indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { MagickRealType intensity; intensity=GetPixelIntensity(image,q); if (order == 1) { if (intensity < lower_threshold) threshold=lower_threshold; else if (intensity > upper_threshold) threshold=upper_threshold; else threshold=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info)); } else if (order == 2) threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)]; else if (order == 3) threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)]; else if (order == 4) threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)]; index=(IndexPacket) (intensity <= threshold ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } } if (LocaleCompare(channel,"opacity") == 0 || LocaleCompare(channel,"all") == 0 || LocaleCompare(channel,"matte") == 0) { if (image->matte != MagickFalse) for (x=0; x < (ssize_t) image->columns; x++) { if (order == 1) { if ((MagickRealType) q->opacity < lower_threshold) threshold=lower_threshold; else if ((MagickRealType) q->opacity > upper_threshold) threshold=upper_threshold; else threshold=(MagickRealType) (QuantumRange* GetPseudoRandomValue(random_info)); } else if (order == 2) threshold=(MagickRealType) QuantumRange*o2[(x%2)+2*(y%2)]; else if (order == 3) threshold=(MagickRealType) QuantumRange*o3[(x%3)+3*(y%3)]; else if (order == 4) threshold=(MagickRealType) QuantumRange*o4[(x%4)+4*(y%4)]/1.7; SetPixelOpacity(q,(MagickRealType) q->opacity <= threshold ? 0 : QuantumRange); q++; } } else { /* To Do: red, green, blue, cyan, magenta, yellow, black */ if (LocaleCompare(channel,"intensity") != 0) ThrowBinaryException(OptionError,"UnrecognizedChannelType", image->filename); } if (SyncAuthenticPixels(image,exception) == MagickFalse) break; } random_info=DestroyRandomInfo(random_info); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e a c q u i r e M e m o r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReacquireMemory() changes the size of the memory and returns a pointer to % the (possibly moved) block. The contents will be unchanged up to the % lesser of the new and old sizes. % % The format of the ReacquireMemory method is: % % void ReacquireMemory(void **memory,const size_t size) % % A description of each parameter follows: % % o memory: A pointer to a memory allocation. On return the pointer % may change but the contents of the original allocation will not. % % o size: the new size of the allocated memory. % */ MagickExport void ReacquireMemory(void **memory,const size_t size) { void *allocation; assert(memory != (void **) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (*memory == (void *) NULL) { *memory=AcquireMagickMemory(size); return; } allocation=realloc(*memory,size); if (allocation == (void *) NULL) *memory=RelinquishMagickMemory(*memory); *memory=allocation; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e c o l o r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RecolorImage() apply color transformation to an image. The method permits % saturation changes, hue rotation, luminance to alpha, and various other % effects. Although variable-sized transformation matrices can be used, % typically one uses a 5x5 matrix for an RGBA image and a 6x6 for CMYKA % (or RGBA with offsets). The matrix is similar to those used by Adobe Flash % except offsets are in column 6 rather than 5 (in support of CMYKA images) % and offsets are normalized (divide Flash offset by 255). % % The format of the RecolorImage method is: % % Image *RecolorImage(const Image *image,const size_t order, % const double *color_matrix,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o order: the number of columns and rows in the recolor matrix. % % o color_matrix: An array of double representing the recolor matrix. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *RecolorImage(const Image *image,const size_t order, const double *color_matrix,ExceptionInfo *exception) { KernelInfo *kernel_info; Image *recolor_image; kernel_info=AcquireKernelInfo("1"); if (kernel_info == (KernelInfo *) NULL) return((Image *) NULL); kernel_info->width=order; kernel_info->height=order; kernel_info->values=(double *) color_matrix; recolor_image=ColorMatrixImage(image,kernel_info,exception); kernel_info->values=(double *) NULL; kernel_info=DestroyKernelInfo(kernel_info); return(recolor_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e d u c e N o i s e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ReduceNoiseImage() smooths the contours of an image while still preserving % edge information. The algorithm works by replacing each pixel with its % neighbor closest in value. A neighbor is defined by radius. Use a radius % of 0 and ReduceNoise() selects a suitable radius for you. % % The format of the ReduceNoiseImage method is: % % Image *ReduceNoiseImage(const Image *image,const double radius, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o radius: the radius of the pixel neighborhood. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ReduceNoiseImage(const Image *image,const double radius, ExceptionInfo *exception) { Image *reduce_image; reduce_image=StatisticImage(image,NonpeakStatistic,(size_t) radius,(size_t) radius,exception); return(reduce_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e l i n g u i s h S e m a p h o r e I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RelinquishSemaphoreInfo() relinquishes a semaphore. % % The format of the RelinquishSemaphoreInfo method is: % % RelinquishSemaphoreInfo(SemaphoreInfo *semaphore_info) % % A description of each parameter follows: % % o semaphore_info: Specifies a pointer to an SemaphoreInfo structure. % */ MagickExport void RelinquishSemaphoreInfo(SemaphoreInfo *semaphore_info) { assert(semaphore_info != (SemaphoreInfo *) NULL); UnlockSemaphoreInfo(semaphore_info); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R e s e t I m a g e A t t r i b u t e I t e r a t o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ResetImageAttributeIterator() resets the image attributes iterator. Use it % in conjunction with GetNextImageAttribute() to iterate over all the values % associated with an image. % % Deprecated, replace with: % % ResetImagePropertyIterator(image); % % The format of the ResetImageAttributeIterator method is: % % ResetImageAttributeIterator(const ImageInfo *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport void ResetImageAttributeIterator(const Image *image) { ResetImagePropertyIterator(image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t C a c h e V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetCacheViewPixels() gets pixels from the in-memory or disk pixel cache as % defined by the geometry parameters. A pointer to the pixels is returned % if the pixels are transferred, otherwise a NULL is returned. % % Deprecated, replace with: % % QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows, % GetCacheViewException(cache_view)); % % The format of the SetCacheViewPixels method is: % % PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x, % const ssize_t y,const size_t columns,const size_t rows) % % A description of each parameter follows: % % o cache_view: the cache view. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *SetCacheViewPixels(CacheView *cache_view,const ssize_t x, const ssize_t y,const size_t columns,const size_t rows) { PixelPacket *pixels; pixels=QueueCacheViewAuthenticPixels(cache_view,x,y,columns,rows, GetCacheViewException(cache_view)); return(pixels); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S e t C a c h e T h e s h o l d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetCacheThreshold() sets the amount of free memory allocated for the pixel % cache. Once this threshold is exceeded, all subsequent pixels cache % operations are to/from disk. % % The format of the SetCacheThreshold() method is: % % void SetCacheThreshold(const size_t threshold) % % A description of each parameter follows: % % o threshold: the number of megabytes of memory available to the pixel % cache. % */ MagickExport void SetCacheThreshold(const size_t size) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.1"); (void) SetMagickResourceLimit(MemoryResource,size*1024*1024); (void) SetMagickResourceLimit(MapResource,2*size*1024*1024); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t E x c e p t i o n I n f o % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetExceptionInfo() sets the exception severity. % % The format of the SetExceptionInfo method is: % % MagickBooleanType SetExceptionInfo(ExceptionInfo *exception, % ExceptionType severity) % % A description of each parameter follows: % % o exception: the exception info. % % o severity: the exception severity. % */ MagickExport MagickBooleanType SetExceptionInfo(ExceptionInfo *exception, ExceptionType severity) { assert(exception != (ExceptionInfo *) NULL); ClearMagickException(exception); exception->severity=severity; return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImage() sets the red, green, and blue components of each pixel to % the image background color and the opacity component to the specified % level of transparency. The background color is defined by the % background_color member of the image. % % The format of the SetImage method is: % % void SetImage(Image *image,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o opacity: Set each pixel to this level of transparency. % */ MagickExport void SetImage(Image *image,const Quantum opacity) { PixelPacket background_color; ssize_t y; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.0"); assert(image != (Image *) NULL); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"..."); assert(image->signature == MagickCoreSignature); background_color=image->background_color; if (opacity != OpaqueOpacity) background_color.opacity=opacity; if (background_color.opacity != OpaqueOpacity) { (void) SetImageStorageClass(image,DirectClass); image->matte=MagickTrue; } if ((image->storage_class == PseudoClass) || (image->colorspace == CMYKColorspace)) { /* Set colormapped or CMYK image. */ for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRGBO(q,&background_color); q++; } indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) SetPixelIndex(indexes+x,0); if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } return; } /* Set DirectClass image. */ for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=QueueAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRGBO(q,&background_color); q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; } } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e A t t r i b u t e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageAttribute() searches the list of image attributes and replaces the % attribute value. If it is not found in the list, the attribute name % and value is added to the list. % % Deprecated, replace with: % % SetImageProperty(image,key,value); % % The format of the SetImageAttribute method is: % % MagickBooleanType SetImageAttribute(Image *image,const char *key, % const char *value) % % A description of each parameter follows: % % o image: the image. % % o key: the key. % % o value: the value. % */ MagickExport MagickBooleanType SetImageAttribute(Image *image,const char *key, const char *value) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.3.1"); return(SetImageProperty(image,key,value)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImageList() inserts an image into the list at the specified position. % % The format of the SetImageList method is: % % unsigned int SetImageList(Image *images,const Image *image, % const ssize_t offset,ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o image: the image. % % o offset: the position within the list. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int SetImageList(Image **images,const Image *image, const ssize_t offset,ExceptionInfo *exception) { Image *clone; register ssize_t i; (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); clone=CloneImageList(image,exception); while (GetPreviousImageInList(*images) != (Image *) NULL) (*images)=GetPreviousImageInList(*images); for (i=0; i < offset; i++) { if (GetNextImageInList(*images) == (Image *) NULL) return(MagickFalse); (*images)=GetNextImageInList(*images); } InsertImageInList(images,clone); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetImagePixels() queues a mutable pixel region. % If the region is successfully initialized a pointer to a PixelPacket % array representing the region is returned, otherwise NULL is returned. % The returned pointer may point to a temporary working buffer for the % pixels or it may point to the final location of the pixels in memory. % % Write-only access means that any existing pixel values corresponding to % the region are ignored. This useful while the initial image is being % created from scratch, or if the existing pixel values are to be % completely replaced without need to refer to their pre-existing values. % The application is free to read and write the pixel buffer returned by % SetImagePixels() any way it pleases. SetImagePixels() does not initialize % the pixel array values. Initializing pixel array values is the % application's responsibility. % % Performance is maximized if the selected region is part of one row, or % one or more full rows, since then there is opportunity to access the % pixels in-place (without a copy) if the image is in RAM, or in a % memory-mapped file. The returned pointer should *never* be deallocated % by the user. % % Pixels accessed via the returned pointer represent a simple array of type % PixelPacket. If the image type is CMYK or the storage class is PseudoClass, % call GetAuthenticIndexQueue() after invoking GetAuthenticPixels() to obtain % the black color component or the colormap indexes (of type IndexPacket) % corresponding to the region. Once the PixelPacket (and/or IndexPacket) % array has been updated, the changes must be saved back to the underlying % image using SyncAuthenticPixels() or they may be lost. % % Deprecated, replace with: % % QueueAuthenticPixels(image,x,y,columns,rows,&image->exception); % % The format of the SetImagePixels() method is: % % PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y, % const size_t columns,const size_t rows) % % A description of each parameter follows: % % o pixels: SetImagePixels returns a pointer to the pixels if they are % transferred, otherwise a NULL is returned. % % o image: the image. % % o x,y,columns,rows: These values define the perimeter of a region of % pixels. % */ MagickExport PixelPacket *SetImagePixels(Image *image,const ssize_t x,const ssize_t y, const size_t columns,const size_t rows) { return(QueueAuthenticPixels(image,x,y,columns,rows,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M a g i c k R e g i s t r y % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMagickRegistry() sets a blob into the registry and returns a unique ID. % If an error occurs, -1 is returned. % % The format of the SetMagickRegistry method is: % % ssize_t SetMagickRegistry(const RegistryType type,const void *blob, % const size_t length,ExceptionInfo *exception) % % A description of each parameter follows: % % o type: the registry type. % % o blob: the address of a Binary Large OBject. % % o length: For a registry type of ImageRegistryType use sizeof(Image) % otherise the blob length in number of bytes. % % o exception: return any errors or warnings in this structure. % */ MagickExport ssize_t SetMagickRegistry(const RegistryType type,const void *blob, const size_t magick_unused(length),ExceptionInfo *exception) { char key[MaxTextExtent]; MagickBooleanType status; static ssize_t id = 0; magick_unreferenced(length); (void) FormatLocaleString(key,MaxTextExtent,"%.20g\n",(double) id); status=SetImageRegistry(type,key,blob,exception); if (status == MagickFalse) return(-1); return(id++); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S e t M o n i t o r H a n d l e r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SetMonitorHandler() sets the monitor handler to the specified method % and returns the previous monitor handler. % % The format of the SetMonitorHandler method is: % % MonitorHandler SetMonitorHandler(MonitorHandler handler) % % A description of each parameter follows: % % o handler: Specifies a pointer to a method to handle monitors. % */ MagickExport MonitorHandler GetMonitorHandler(void) { return(monitor_handler); } MagickExport MonitorHandler SetMonitorHandler(MonitorHandler handler) { MonitorHandler previous_handler; previous_handler=monitor_handler; monitor_handler=handler; return(previous_handler); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S h i f t I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ShiftImageList() removes an image from the beginning of the list. % % Deprecated, replace with: % % RemoveFirstImageFromList(images); % % The format of the ShiftImageList method is: % % Image *ShiftImageList(Image **images) % % A description of each parameter follows: % % o images: the image list. % */ MagickExport Image *ShiftImageList(Image **images) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); return(RemoveFirstImageFromList(images)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + S i z e B l o b % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SizeBlob() returns the current length of the image file or blob. % % Deprecated, replace with: % % GetBlobSize(image); % % The format of the SizeBlob method is: % % off_t SizeBlob(Image *image) % % A description of each parameter follows: % % o size: Method SizeBlob returns the current length of the image file % or blob. % % o image: the image. % */ MagickExport MagickOffsetType SizeBlob(Image *image) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.3"); return((MagickOffsetType) GetBlobSize(image)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S p l i c e I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SpliceImageList() removes the images designated by offset and length from % the list and replaces them with the specified list. % % The format of the SpliceImageList method is: % % Image *SpliceImageList(Image *images,const ssize_t offset, % const size_t length,const Image *splices, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o offset: the position within the list. % % o length: the length of the image list to remove. % % o splice: Replace the removed image list with this list. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *SpliceImageList(Image *images,const ssize_t offset, const size_t length,const Image *splices,ExceptionInfo *exception) { Image *clone; register ssize_t i; if (images->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); clone=CloneImageList(splices,exception); while (GetPreviousImageInList(images) != (Image *) NULL) images=GetPreviousImageInList(images); for (i=0; i < offset; i++) { if (GetNextImageInList(images) == (Image *) NULL) return((Image *) NULL); images=GetNextImageInList(images); } (void) SpliceImageIntoList(&images,length,clone); return(images); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % s R G B C o m p a n d o r % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % sRGBCompandor() adds the gamma function to a sRGB pixel. % % The format of the sRGBCompandor method is: % % MagickRealType sRGBCompandor(const MagickRealType pixel) % % A description of each parameter follows: % % o pixel: the pixel. % */ MagickExport MagickRealType sRGBCompandor(const MagickRealType pixel) { if (pixel <= (0.0031306684425005883*QuantumRange)) return(12.92*pixel); return(QuantumRange*(1.055*pow(QuantumScale*pixel,1.0/2.4)-0.055)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S t r i p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % Strip() strips any whitespace or quotes from the beginning and end of a % string of characters. % % The format of the Strip method is: % % void Strip(char *message) % % A description of each parameter follows: % % o message: Specifies an array of characters. % */ MagickExport void Strip(char *message) { register char *p, *q; assert(message != (char *) NULL); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (*message == '\0') return; if (strlen(message) == 1) return; p=message; while (isspace((int) ((unsigned char) *p)) != 0) p++; if ((*p == '\'') || (*p == '"')) p++; q=message+strlen(message)-1; while ((isspace((int) ((unsigned char) *q)) != 0) && (q > p)) q--; if (q > p) if ((*q == '\'') || (*q == '"')) q--; (void) CopyMagickMemory(message,p,(size_t) (q-p+1)); message[q-p+1]='\0'; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c C a c h e V i e w % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncCacheView() saves the cache view pixels to the in-memory or disk % cache. It returns MagickTrue if the pixel region is synced, otherwise % MagickFalse. % % Deprecated, replace with: % % SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view)); % % The format of the SyncCacheView method is: % % MagickBooleanType SyncCacheView(CacheView *cache_view) % % A description of each parameter follows: % % o cache_view: the cache view. % */ MagickExport MagickBooleanType SyncCacheView(CacheView *cache_view) { MagickBooleanType status; status=SyncCacheViewAuthenticPixels(cache_view, GetCacheViewException(cache_view)); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c C a c h e V i e w P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncCacheViewPixels() saves the cache view pixels to the in-memory % or disk cache. It returns MagickTrue if the pixel region is flushed, % otherwise MagickFalse. % % Deprecated, replace with: % % SyncCacheViewAuthenticPixels(cache_view,GetCacheViewException(cache_view)); % % The format of the SyncCacheViewPixels method is: % % MagickBooleanType SyncCacheViewPixels(CacheView *cache_view) % % A description of each parameter follows: % % o cache_view: the cache view. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType SyncCacheViewPixels(CacheView *cache_view) { MagickBooleanType status; status=SyncCacheViewAuthenticPixels(cache_view, GetCacheViewException(cache_view)); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y n c I m a g e P i x e l s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SyncImagePixels() saves the image pixels to the in-memory or disk cache. % The method returns MagickTrue if the pixel region is synced, otherwise % MagickFalse. % % Deprecated, replace with: % % SyncAuthenticPixels(image,&image->exception); % % The format of the SyncImagePixels() method is: % % MagickBooleanType SyncImagePixels(Image *image) % % A description of each parameter follows: % % o image: the image. % */ MagickExport MagickBooleanType SyncImagePixels(Image *image) { return(SyncAuthenticPixels(image,&image->exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % S y s t e m C o m m a n d % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % SystemCommand() executes the specified command and waits until it % terminates. The returned value is the exit status of the command. % % The format of the SystemCommand method is: % % int SystemCommand(const MagickBooleanType asynchronous, % const MagickBooleanType verbose,const char *command, % ExceptionInfo *exception) % % A description of each parameter follows: % % o asynchronous: a value other than 0 executes the parent program % concurrently with the new child process. % % o verbose: a value other than 0 prints the executed command before it is % invoked. % % o command: this string is the command to execute. % % o exception: return any errors here. % */ MagickExport int SystemCommand(const MagickBooleanType asynchronous, const MagickBooleanType verbose,const char *command,ExceptionInfo *exception) { int status; status=ExternalDelegateCommand(asynchronous,verbose,command,(char *) NULL, exception); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T e m p o r a r y F i l e n a m e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TemporaryFilename() replaces the contents of path by a unique path name. % % The format of the TemporaryFilename method is: % % void TemporaryFilename(char *path) % % A description of each parameter follows. % % o path: Specifies a pointer to an array of characters. The unique path % name is returned in this array. % */ MagickExport void TemporaryFilename(char *path) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6"); (void) AcquireUniqueFilename(path); (void) RelinquishUniqueFileResource(path); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThresholdImage() changes the value of individual pixels based on % the intensity of each pixel compared to threshold. The result is a % high-contrast, two color image. % % The format of the ThresholdImage method is: % % unsigned int ThresholdImage(Image *image,const double threshold) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value % */ MagickExport unsigned int ThresholdImage(Image *image,const double threshold) { #define ThresholdImageTag "Threshold/Image" IndexPacket index; ssize_t y; /* Threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.7"); if (!AcquireImageColormap(image,2)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", "UnableToThresholdImage"); for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); for (x=0; x < (ssize_t) image->columns; x++) { index=(IndexPacket) (GetPixelIntensity(image,q) <= threshold ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRGBO(q,image->colormap+(ssize_t) index); q++; } if (!SyncAuthenticPixels(image,&image->exception)) break; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T h r e s h o l d I m a g e C h a n n e l % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ThresholdImageChannel() changes the value of individual pixels based on % the intensity of each pixel channel. The result is a high-contrast image. % % The format of the ThresholdImageChannel method is: % % unsigned int ThresholdImageChannel(Image *image,const char *threshold) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % */ MagickExport unsigned int ThresholdImageChannel(Image *image, const char *threshold) { #define ThresholdImageTag "Threshold/Image" MagickPixelPacket pixel; GeometryInfo geometry_info; IndexPacket index; ssize_t y; unsigned int flags; /* Threshold image. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (threshold == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass) == MagickFalse) return(MagickFalse); GetMagickPixelPacket(image,&pixel); flags=ParseGeometry(threshold,&geometry_info); pixel.red=geometry_info.rho; if (flags & SigmaValue) pixel.green=geometry_info.sigma; else pixel.green=pixel.red; if (flags & XiValue) pixel.blue=geometry_info.xi; else pixel.blue=pixel.red; if (flags & PsiValue) pixel.opacity=geometry_info.psi; else pixel.opacity=(MagickRealType) OpaqueOpacity; if (flags & PercentValue) { pixel.red*=QuantumRange/100.0f; pixel.green*=QuantumRange/100.0f; pixel.blue*=QuantumRange/100.0f; pixel.opacity*=QuantumRange/100.0f; } if (!(flags & SigmaValue)) { if (!AcquireImageColormap(image,2)) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", "UnableToThresholdImage"); if (pixel.red == 0) (void) GetImageDynamicThreshold(image,2.0,2.0,&pixel,&image->exception); } for (y=0; y < (ssize_t) image->rows; y++) { register IndexPacket *magick_restrict indexes; register ssize_t x; register PixelPacket *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; indexes=GetAuthenticIndexQueue(image); if (IsMagickGray(&pixel) != MagickFalse) for (x=0; x < (ssize_t) image->columns; x++) { index=(IndexPacket) (GetPixelIntensity(image,q) <= pixel.red ? 0 : 1); SetPixelIndex(indexes+x,index); SetPixelRed(q,image->colormap[(ssize_t) index].red); SetPixelGreen(q,image->colormap[(ssize_t) index].green); SetPixelBlue(q,image->colormap[(ssize_t) index].blue); q++; } else for (x=0; x < (ssize_t) image->columns; x++) { SetPixelRed(q,(MagickRealType) q->red <= pixel.red ? 0 : QuantumRange); SetPixelGreen(q,(MagickRealType) q->green <= pixel.green ? 0 : QuantumRange); SetPixelBlue(q,(MagickRealType) q->blue <= pixel.blue ? 0 : QuantumRange); SetPixelOpacity(q,(MagickRealType) q->opacity <= pixel.opacity ? 0 : QuantumRange); q++; } if (!SyncAuthenticPixels(image,&image->exception)) break; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + T r a n s f o r m C o l o r s p a c e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformColorspace() converts the image to a specified colorspace. % If the image is already in the requested colorspace, no work is performed. % Note that the current colorspace is stored in the image colorspace member. % The transformation matrices are not necessarily the standard ones: the % weights are rescaled to normalize the range of the transformed values to % be [0..QuantumRange]. % % Deprecated, replace with: % % TransformImageColorspace(image,colorspace); % % The format of the TransformColorspace method is: % % unsigned int (void) TransformColorspace(Image *image, % const ColorspaceType colorspace) % % A description of each parameter follows: % % o image: the image to transform % % o colorspace: the desired colorspace. % */ MagickExport unsigned int TransformColorspace(Image *image, const ColorspaceType colorspace) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.6"); return(TransformImageColorspace(image,colorspace)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s f o r m H S L % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransformHSL() converts a (red, green, blue) to a (hue, saturation, % lightness) triple. % % The format of the TransformHSL method is: % % void TransformHSL(const Quantum red,const Quantum green, % const Quantum blue,double *hue,double *saturation,double *lightness) % % A description of each parameter follows: % % o red, green, blue: A Quantum value representing the red, green, and % blue component of a pixel.. % % o hue, saturation, lightness: A pointer to a double value representing a % component of the HSL color space. % */ MagickExport void TransformHSL(const Quantum red,const Quantum green, const Quantum blue,double *hue,double *saturation,double *lightness) { MagickRealType b, delta, g, max, min, r; /* Convert RGB to HSL colorspace. */ assert(hue != (double *) NULL); assert(saturation != (double *) NULL); assert(lightness != (double *) NULL); r=QuantumScale*red; g=QuantumScale*green; b=QuantumScale*blue; max=MagickMax(r,MagickMax(g,b)); min=MagickMin(r,MagickMin(g,b)); *hue=0.0; *saturation=0.0; *lightness=(double) ((min+max)/2.0); delta=max-min; if (delta == 0.0) return; *saturation=(double) (delta/((*lightness < 0.5) ? (min+max) : (2.0-max-min))); if (r == max) *hue=(double) (g == min ? 5.0+(max-b)/delta : 1.0-(max-g)/delta); else if (g == max) *hue=(double) (b == min ? 1.0+(max-r)/delta : 3.0-(max-b)/delta); else *hue=(double) (r == min ? 3.0+(max-g)/delta : 5.0-(max-r)/delta); *hue/=6.0; } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s l a t e T e x t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TranslateText() replaces any embedded formatting characters with the % appropriate image attribute and returns the translated text. % % Deprecated, replace with: % % InterpretImageProperties(image_info,image,embed_text); % % The format of the TranslateText method is: % % char *TranslateText(const ImageInfo *image_info,Image *image, % const char *embed_text) % % A description of each parameter follows: % % o image_info: the image info. % % o image: the image. % % o embed_text: the address of a character string containing the embedded % formatting characters. % */ MagickExport char *TranslateText(const ImageInfo *image_info,Image *image, const char *embed_text) { assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.2.6"); return(InterpretImageProperties(image_info,image,embed_text)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % T r a n s p a r e n t I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % TransparentImage() changes the opacity value associated with any pixel % that matches color to the value defined by opacity. % % By default color must match a particular pixel color exactly. However, % in many cases two colors may differ by a small amount. Fuzz defines % how much tolerance is acceptable to consider two colors as the same. % For example, set fuzz to 10 and the color red at intensities of 100 and % 102 respectively are now interpreted as the same color. % % The format of the TransparentImage method is: % % MagickBooleanType TransparentImage(Image *image, % const PixelPacket target,const Quantum opacity) % % A description of each parameter follows: % % o image: the image. % % o target: the RGB value of the target color. % % o opacity: the replacement opacity value. % */ MagickExport MagickBooleanType TransparentImage(Image *image, const PixelPacket target,const Quantum opacity) { #define TransparentImageTag "Transparent/Image" MagickBooleanType proceed; ssize_t y; /* Make image color transparent. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v6.1.0"); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->matte == MagickFalse) (void) SetImageAlphaChannel(image,OpaqueAlphaChannel); for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register PixelPacket *magick_restrict q; q=GetAuthenticPixels(image,0,y,image->columns,1,&image->exception); if (q == (PixelPacket *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { if (IsColorSimilar(image,q,&target) != MagickFalse) q->opacity=opacity; q++; } if (SyncAuthenticPixels(image,&image->exception) == MagickFalse) break; proceed=SetImageProgress(image,TransparentImageTag,(MagickOffsetType) y, image->rows); if (proceed == MagickFalse) break; } return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % U n s h i f t I m a g e L i s t % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % UnshiftImageList() adds the image to the beginning of the list. % % Deprecated, replace with: % % PrependImageToList(images,CloneImageList(image,exception)); % % The format of the UnshiftImageList method is: % % unsigned int UnshiftImageList(Image *images,const Image *image, % ExceptionInfo *exception) % % A description of each parameter follows: % % o images: the image list. % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport unsigned int UnshiftImageList(Image **images,const Image *image, ExceptionInfo *exception) { (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.5.2"); PrependImageToList(images,CloneImageList(image,exception)); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + V a l i d a t e C o l o r m a p I n d e x % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ValidateColormapIndex() validates the colormap index. If the index does % not range from 0 to the number of colors in the colormap an exception % issued and 0 is returned. % % Deprecated, replace with: % % ConstrainColormapIndex(image,index); % % The format of the ValidateColormapIndex method is: % % IndexPacket ValidateColormapIndex(Image *image,const unsigned int index) % % A description of each parameter follows: % % o index: Method ValidateColormapIndex returns colormap index if it is % valid other an exception issued and 0 is returned. % % o image: the image. % % o index: This integer is the colormap index. % */ MagickExport IndexPacket ValidateColormapIndex(Image *image, const size_t index) { if (image->debug != MagickFalse) (void) LogMagickEvent(DeprecateEvent,GetMagickModule(),"last use: v5.4.4"); return(ConstrainColormapIndex(image,index)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % Z o o m I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ZoomImage() creates a new image that is a scaled size of an existing one. % It allocates the memory necessary for the new Image structure and returns a % pointer to the new image. The Point filter gives fast pixel replication, % Triangle is equivalent to bi-linear interpolation, and Mitchel giver slower, % very high-quality results. See Graphic Gems III for details on this % algorithm. % % The filter member of the Image structure specifies which image filter to % use. Blur specifies the blur factor where > 1 is blurry, < 1 is sharp. % % The format of the ZoomImage method is: % % Image *ZoomImage(const Image *image,const size_t columns, % const size_t rows,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o columns: An integer that specifies the number of columns in the zoom % image. % % o rows: An integer that specifies the number of rows in the scaled % image. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *ZoomImage(const Image *image,const size_t columns, const size_t rows,ExceptionInfo *exception) { Image *zoom_image; assert(image != (const Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); zoom_image=ResizeImage(image,columns,rows,image->filter,image->blur, exception); return(zoom_image); } #endif
im2col.c
#include <stdlib.h> #include "im2col.h" void im2col(float* x,float* w,int RR,int W,int K,int B,int A,float*output){ float* tmp2 = (float*) calloc(1,(W) * (RR) * sizeof (float)); for (int H10 = 0; H10 < W; H10++) { for (int H11 = 0; H11 < RR; H11++) { if (H10 + H11 < K) { tmp2[(RR) * (H10) + H11] = x[H10 + H11]; } } } float* x1 = tmp2; #pragma omp parallel for for (int H13 = 0; H13 < K; H13++) { for (int H14 = 0; H14 < W; H14++) { for (int H15 = 0; H15 < RR; H15++) { float tmp3 = 0; float tmp4 = 0; tmp4 = w[(((B)) * (H13)) + H15]; float tmp5 = 0; tmp5 = x1[(((RR)) * (H14)) + H15]; tmp3 = tmp4 * tmp5; output[(W) * (H13) + H14] = output[(W) * (H13) + H14] + tmp3; } } } free(tmp2); }
Ohara_Rudy_2011.c
#include <stdlib.h> #include "Ohara_Rudy_2011.h" int celltype = 0; GET_CELL_MODEL_DATA(init_cell_model_data) { if(get_initial_v) cell_model->initial_v = INITIAL_V; if(get_neq) cell_model->number_of_ode_equations = NEQ; } SET_ODE_INITIAL_CONDITIONS_CPU(set_model_initial_conditions_cpu) { sv[0] = INITIAL_V; //v sv[1] = 7; //nai sv[2] = 7; //nass sv[3] = 145; //ki sv[4] = 145; //kss sv[5] = 1.0e-4; //cai sv[6] = 1.0e-4; //cass sv[7] = 1.2; //cansr sv[8] = 1.2; //cajsr sv[9] = 0; //m sv[10] = 1; //hf sv[11] = 1; //hs sv[12] = 1; //j sv[13] = 1; //hsp sv[14] = 1; //jp sv[15] = 0; //mL sv[16] = 1; //hL sv[17] = 1; //hLp sv[18] = 0; //a sv[19] = 1; //iF sv[20] = 1; //iS sv[21] = 0; //ap sv[22] = 1; //iFp sv[23] = 1; //iSp sv[24] = 0; //d sv[25] = 1; //ff sv[26] = 1; //fs sv[27] = 1; //fcaf sv[28] = 1; //fcas sv[29] = 1; //jca sv[30] = 0; //nca sv[31] = 1; //ffp sv[32] = 1; //fcafp sv[33] = 0; //xrf sv[34] = 0; //xrs sv[35] = 0; //xs1 sv[36] = 0; //xs2 sv[37] = 1; //xk1 sv[38] = 0; //Jrelnp sv[39] = 0; //Jrelp sv[40] = 0; //CaMKt } SOLVE_MODEL_ODES_CPU(solve_model_odes_cpu) { uint32_t sv_id; int i; #pragma omp parallel for private(sv_id) for (i = 0; i < num_cells_to_solve; i++) { if(cells_to_solve) sv_id = cells_to_solve[i]; else sv_id = (uint32_t )i; for (int j = 0; j < num_steps; ++j) { solve_model_ode_cpu(dt, sv + (sv_id * NEQ), stim_currents[i]); } } } void solve_model_ode_cpu(real dt, real *sv, real stim_current) { real rY[NEQ], rDY[NEQ]; for(int i = 0; i < NEQ; i++) rY[i] = sv[i]; RHS_cpu(rY, rDY, stim_current, dt); for(int i = 0; i < NEQ; i++) sv[i] = rDY[i]; } void RHS_cpu(const real *sv, real *rDY_, real stim_current,real dt) { // State variables real v = sv[0]; real nai = sv[1]; real nass = sv[2]; real ki = sv[3]; real kss = sv[4]; real cai = sv[5]; real cass = sv[6]; real cansr = sv[7]; real cajsr = sv[8]; real m = sv[9]; real hf = sv[10]; real hs = sv[11]; real j = sv[12]; real hsp = sv[13]; real jp = sv[14]; real mL = sv[15]; real hL = sv[16]; real hLp = sv[17]; real a = sv[18]; real iF = sv[19]; real iS = sv[20]; real ap = sv[21]; real iFp = sv[22]; real iSp = sv[23]; real d = sv[24]; real ff = sv[25]; real fs = sv[26]; real fcaf = sv[27]; real fcas = sv[28]; real jca = sv[29]; real nca = sv[30]; real ffp = sv[31]; real fcafp = sv[32]; real xrf = sv[33]; real xrs = sv[34]; real xs1 = sv[35]; real xs2 = sv[36]; real xk1 = sv[37]; real Jrelnp = sv[38]; real Jrelp = sv[39]; real CaMKt = sv[40]; //constants real const nao=140.0;//extracellular sodium in mM real const cao=1.8;//extracellular calcium in mM real const ko=5.4;//extracellular potassium in mM //buffer paramaters real const BSRmax=0.047; real const KmBSR=0.00087; real const BSLmax=1.124; real const KmBSL=0.0087; real const cmdnmax=0.05; real const kmcmdn=0.00238; real const trpnmax=0.07; real const kmtrpn=0.0005; real const csqnmax=10.0f; real const kmcsqn=0.8; //CaMK paramaters real const aCaMK=0.05; real const bCaMK=0.00068; real const CaMKo=0.05; real const KmCaM=0.0015; real const KmCaMK=0.15; //physical constants real const R=8314.0; real const T=310.0; real const F=96485.0; //cell geometry real const L=0.01; real const rad=0.0011; real const vcell=1000.0f*3.14f*rad*rad*L; real const Ageo=2.0f*3.14f*rad*rad+2.0f*3.14f*rad*L; real const Acap=2*Ageo; real const vmyo=0.68f*vcell; // real const vmito=0.26*vcell; // real const vsr=0.06*vcell; real const vnsr=0.0552f*vcell; real const vjsr=0.0048f*vcell; real const vss=0.02f*vcell; real ENa=(R*T/F)*logf(nao/nai); real EK=(R*T/F)*logf(ko/ki); real EKs=(R*T/F)*logf((ko+0.01833f*nao)/(ki+0.01833f*nai)); real CaMKb=CaMKo*(1.0f-CaMKt)/(1.0f+KmCaM/cass); real CaMKa=CaMKb+CaMKt; real vffrt=v*F*F/(R*T); real vfrt=v*F/(R*T); real mss=1.0f/(1.0f+expf((-(v+39.57f))/9.871f)); real tm=1.0f/(6.765f*expf((v+11.64f)/34.77f)+8.552f*expf(-(v+77.42f)/5.955f)); m=mss-(mss-m)*expf(-dt/tm); real hss=1.0f/(1+expf((v+82.90f)/6.086f)); real thf=1.0f/(1.432e-5f*expf(-(v+1.196f)/6.285f)+6.149f*expf((v+0.5096f)/20.27f)); real ths=1.0f/(0.009794f*expf(-(v+17.95f)/28.05f)+0.3343f*expf((v+5.730f)/56.66f)); real Ahf=0.99; real Ahs=1.0f-Ahf; hf=hss-(hss-hf)*expf(-dt/thf); hs=hss-(hss-hs)*expf(-dt/ths); real h=Ahf*hf+Ahs*hs; real jss=hss; real tj=2.038f+1.0f/(0.02136f*expf(-(v+100.6f)/8.281f)+0.3052f*expf((v+0.9941f)/38.45f)); j=jss-(jss-j)*expf(-dt/tj); real hssp=1.0f/(1.0f+expf((v+89.1f)/6.086f)); real thsp=3.0f*ths; hsp=hssp-(hssp-hsp)*expf(-dt/thsp); real hp=Ahf*hf+Ahs*hsp; real tjp=1.46f*tj; jp=jss-(jss-jp)*expf(-dt/tjp); real GNa=75; real fINap=(1.0f/(1.0f+KmCaMK/CaMKa)); real INa=GNa*(v-ENa)*m*m*m*((1.0f-fINap)*h*j+fINap*hp*jp); real mLss=1.0f/(1.0f+expf((-(v+42.85f))/5.264f)); real tmL=tm; mL=mLss-(mLss-mL)*expf(-dt/tmL); real hLss=1.0f/(1.0f+expf((v+87.61f)/7.488f)); real thL=200.0; hL=hLss-(hLss-hL)*expf(-dt/thL); real hLssp=1.0f/(1.0f+expf((v+93.81f)/7.488f)); real thLp=3.0f*thL; hLp=hLssp-(hLssp-hLp)*expf(-dt/thLp); real GNaL=0.0075; if (celltype==1) { GNaL*=0.6; } real fINaLp=(1.0f/(1.0f+KmCaMK/CaMKa)); real INaL=GNaL*(v-ENa)*mL*((1.0f-fINaLp)*hL+fINaLp*hLp); real ass=1.0f/(1.0f+expf((-(v-14.34f))/14.82f)); real ta=1.0515f/(1.0f/(1.2089f*(1.0f+expf(-(v-18.4099f)/29.3814f)))+3.5f/(1.0f+expf((v+100.0f)/29.3814f))); a=ass-(ass-a)*expf(-dt/ta); real iss=1.0f/(1.0f+expf((v+43.94f)/5.711f)); real delta_epi; if (celltype==1) { delta_epi=1.0f-(0.95f/(1.0f+expf((v+70.0f)/5.0f))); } else { delta_epi=1.0f; } real tiF=4.562f+1.0f/(0.3933f*expf((-(v+100.0f))/100.0f)+0.08004f*expf((v+50.0f)/16.59f)); real tiS=23.62f+1.0f/(0.001416f*expf((-(v+96.52f))/59.05f)+1.780e-8f*expf((v+114.1f)/8.079f)); tiF*=delta_epi; tiS*=delta_epi; real AiF=1.0f/(1.0f+expf((v-213.6f)/151.2f)); real AiS=1.0f-AiF; iF=iss-(iss-iF)*expf(-dt/tiF); iS=iss-(iss-iS)*expf(-dt/tiS); real i=AiF*iF+AiS*iS; real assp=1.0f/(1.0f+expf((-(v-24.34f))/14.82f)); ap=assp-(assp-ap)*expf(-dt/ta); real dti_develop=1.354f+1.0e-4f/(expf((v-167.4f)/15.89f)+expf(-(v-12.23f)/0.2154f)); real dti_recover=1.0f-0.5f/(1.0f+expf((v+70.0f)/20.0f)); real tiFp=dti_develop*dti_recover*tiF; real tiSp=dti_develop*dti_recover*tiS; iFp=iss-(iss-iFp)*expf(-dt/tiFp); iSp=iss-(iss-iSp)*expf(-dt/tiSp); real ip=AiF*iFp+AiS*iSp; real Gto=0.02; if (celltype==1) { Gto*=4.0; } if (celltype==2) { Gto*=4.0; } real fItop=(1.0f/(1.0f+KmCaMK/CaMKa)); real Ito = Gto*(v-EK)*((1.0f-fItop)*a*i+fItop*ap*ip); real dss=1.0f/(1.0f+expf((-(v+3.940f))/4.230f)); real td=0.6f+1.0f/(expf(-0.05f*(v+6.0f))+expf(0.09f*(v+14.0f))); d=dss-(dss-d)*expf(-dt/td); real fss=1.0f/(1.0f+expf((v+19.58f)/3.696f)); real tff=7.0f+1.0f/(0.0045f*expf(-(v+20.0f)/10.0f)+0.0045f*expf((v+20.0f)/10.0f)); real tfs=1000.0f+1.0f/(0.000035f*expf(-(v+5.0f)/4.0f)+0.000035f*expf((v+5.0f)/6.0f)); real Aff=0.6; real Afs=1.0f-Aff; ff=fss-(fss-ff)*expf(-dt/tff); fs=fss-(fss-fs)*expf(-dt/tfs); real f=Aff*ff+Afs*fs; real fcass=fss; real tfcaf=7.0f+1.0f/(0.04f*expf(-(v-4.0f)/7.0f)+0.04f*expf((v-4.0f)/7.0f)); real tfcas=100.0f+1.0f/(0.00012f*expf(-v/3.0f)+0.00012f*expf(v/7.0f)); real Afcaf=0.3f+0.6f/(1.0f+expf((v-10.0f)/10.0f)); real Afcas=1.0f-Afcaf; fcaf=fcass-(fcass-fcaf)*expf(-dt/tfcaf); fcas=fcass-(fcass-fcas)*expf(-dt/tfcas); real fca=Afcaf*fcaf+Afcas*fcas; real tjca=75.0; jca=fcass-(fcass-jca)*expf(-dt/tjca); real tffp=2.5f*tff; ffp=fss-(fss-ffp)*expf(-dt/tffp); real fp=Aff*ffp+Afs*fs; real tfcafp=2.5f*tfcaf; fcafp=fcass-(fcass-fcafp)*expf(-dt/tfcafp); real fcap=Afcaf*fcafp+Afcas*fcas; real Kmn=0.002; real k2n=1000.0; real km2n=jca*1.0f; real anca=1.0f/(k2n/km2n+powf(1.0f+Kmn/cass,4.0)); nca=anca*k2n/km2n-(anca*k2n/km2n-nca)*expf(-km2n*dt); real PhiCaL=4.0f*vffrt*(cass*expf(2.0f*vfrt)-0.341f*cao)/(expf(2.0f*vfrt)-1.0f); real PhiCaNa=1.0f*vffrt*(0.75f*nass*expf(1.0f*vfrt)-0.75f*nao)/(expf(1.0f*vfrt)-1.0f); real PhiCaK=1.0f*vffrt*(0.75f*kss*expf(1.0f*vfrt)-0.75f*ko)/(expf(1.0f*vfrt)-1.0f); real zca=2.0f; real PCa=0.0001; if (celltype==1) { PCa*=1.2; } if (celltype==2) { PCa*=2.5; } real PCap=1.1f*PCa; real PCaNa=0.00125f*PCa; real PCaK=3.574e-4f*PCa; real PCaNap=0.00125f*PCap; real PCaKp=3.574e-4f*PCap; real fICaLp=(1.0f/(1.0f+KmCaMK/CaMKa)); real ICaL=(1.0f-fICaLp)*PCa*PhiCaL*d*(f*(1.0f-nca)+jca*fca*nca)+fICaLp*PCap*PhiCaL*d*(fp*(1.0f-nca)+jca*fcap*nca); real ICaNa=(1.0f-fICaLp)*PCaNa*PhiCaNa*d*(f*(1.0f-nca)+jca*fca*nca)+fICaLp*PCaNap*PhiCaNa*d*(fp*(1.0f-nca)+jca*fcap*nca); real ICaK=(1.0f-fICaLp)*PCaK*PhiCaK*d*(f*(1.0f-nca)+jca*fca*nca)+fICaLp*PCaKp*PhiCaK*d*(fp*(1.0f-nca)+jca*fcap*nca); real xrss=1.0f/(1.0f+expf((-(v+8.337f))/6.789f)); real txrf=12.98f+1.0f/(0.3652f*expf((v-31.66f)/3.869f)+4.123e-5f*expf((-(v-47.78f))/20.38f)); real txrs=1.865f+1.0f/(0.06629f*expf((v-34.70f)/7.355f)+1.128e-5f*expf((-(v-29.74f))/25.94f)); real Axrf=1.0f/(1.0f+expf((v+54.81f)/38.21f)); real Axrs=1.0f-Axrf; xrf=xrss-(xrss-xrf)*expf(-dt/txrf); xrs=xrss-(xrss-xrs)*expf(-dt/txrs); real xr=Axrf*xrf+Axrs*xrs; real rkr=1.0f/(1.0f+expf((v+55.0f)/75.0f))*1.0f/(1.0f+expf((v-10.0f)/30.0f)); real GKr=0.046; if (celltype==1) { GKr*=1.3; } if (celltype==2) { GKr*=0.8; } real IKr = GKr*sqrtf(ko/5.4f)*xr*rkr*(v-EK); real xs1ss=1.0f/(1.0f+expf((-(v+11.60f))/8.932f)); real txs1=817.3f+1.0f/(2.326e-4f*expf((v+48.28f)/17.80f)+0.001292f*expf((-(v+210.0f))/230.0f)); xs1=xs1ss-(xs1ss-xs1)*expf(-dt/txs1); real xs2ss=xs1ss; real txs2=1.0f/(0.01f*expf((v-50.0f)/20.0f)+0.0193f*expf((-(v+66.54f))/31.0f)); xs2=xs2ss-(xs2ss-xs2)*expf(-dt/txs2); real KsCa=1.0f+0.6f/(1.0f+powf(3.8e-5f/cai,1.4f)); real GKs=0.0034; if (celltype==1) { GKs*=1.4; } real IKs=GKs*KsCa*xs1*xs2*(v-EKs); real xk1ss=1.0f/(1.0f+expf(-(v+2.5538f*ko+144.59f)/(1.5692f*ko+3.8115f))); real txk1=122.2f/(expf((-(v+127.2f))/20.36f)+expf((v+236.8f)/69.33f)); xk1=xk1ss-(xk1ss-xk1)*expf(-dt/txk1); real rk1=1.0f/(1.0f+expf((v+105.8f-2.6f*ko)/9.493f)); real GK1=0.1908; if (celltype==1) { GK1*=1.2; } if (celltype==2) { GK1*=1.3; } real IK1=GK1*sqrtf(ko)*rk1*xk1*(v-EK); real kna1=15.0; real kna2=5.0; real kna3=88.12; real kasymm=12.5; real wna=6.0e4; real wca=6.0e4; real wnaca=5.0e3; real kcaon=1.5e6; real kcaoff=5.0e3; real qna=0.5224; real qca=0.1670; real hca=expf((qca*v*F)/(R*T)); real hna=expf((qna*v*F)/(R*T)); real h1=1+nai/kna3*(1+hna); real h2=(nai*hna)/(kna3*h1); real h3=1.0f/h1; real h4=1.0f+nai/kna1*(1+nai/kna2); real h5=nai*nai/(h4*kna1*kna2); real h6=1.0f/h4; real h7=1.0f+nao/kna3*(1.0f+1.0f/hna); real h8=nao/(kna3*hna*h7); real h9=1.0f/h7; real h10=kasymm+1.0f+nao/kna1*(1.0f+nao/kna2); real h11=nao*nao/(h10*kna1*kna2); real h12=1.0f/h10; real k1=h12*cao*kcaon; real k2=kcaoff; real k3p=h9*wca; real k3pp=h8*wnaca; real k3=k3p+k3pp; real k4p=h3*wca/hca; real k4pp=h2*wnaca; real k4=k4p+k4pp; real k5=kcaoff; real k6=h6*cai*kcaon; real k7=h5*h2*wna; real k8=h8*h11*wna; real x1=k2*k4*(k7+k6)+k5*k7*(k2+k3); real x2=k1*k7*(k4+k5)+k4*k6*(k1+k8); real x3=k1*k3*(k7+k6)+k8*k6*(k2+k3); real x4=k2*k8*(k4+k5)+k3*k5*(k1+k8); real E1=x1/(x1+x2+x3+x4); real E2=x2/(x1+x2+x3+x4); real E3=x3/(x1+x2+x3+x4); real E4=x4/(x1+x2+x3+x4); real KmCaAct=150.0e-6; real allo=1.0f/(1.0f+powf(KmCaAct/cai,2.0f)); real zna=1.0f; real JncxNa=3.0f*(E4*k7-E1*k8)+E3*k4pp-E2*k3pp; real JncxCa=E2*k2-E1*k1; real Gncx=0.0008; if (celltype==1) { Gncx*=1.1; } if (celltype==2) { Gncx*=1.4; } real INaCa_i=0.8f*Gncx*allo*(zna*JncxNa+zca*JncxCa); h1=1+nass/kna3*(1+hna); h2=(nass*hna)/(kna3*h1); h3=1.0f/h1; h4=1.0f+nass/kna1*(1+nass/kna2); h5=nass*nass/(h4*kna1*kna2); h6=1.0f/h4; h7=1.0f+nao/kna3*(1.0f+1.0f/hna); h8=nao/(kna3*hna*h7); h9=1.0f/h7; h10=kasymm+1.0f+nao/kna1*(1+nao/kna2); h11=nao*nao/(h10*kna1*kna2); h12=1.0f/h10; k1=h12*cao*kcaon; k2=kcaoff; k3p=h9*wca; k3pp=h8*wnaca; k3=k3p+k3pp; k4p=h3*wca/hca; k4pp=h2*wnaca; k4=k4p+k4pp; k5=kcaoff; k6=h6*cass*kcaon; k7=h5*h2*wna; k8=h8*h11*wna; x1=k2*k4*(k7+k6)+k5*k7*(k2+k3); x2=k1*k7*(k4+k5)+k4*k6*(k1+k8); x3=k1*k3*(k7+k6)+k8*k6*(k2+k3); x4=k2*k8*(k4+k5)+k3*k5*(k1+k8); E1=x1/(x1+x2+x3+x4); E2=x2/(x1+x2+x3+x4); E3=x3/(x1+x2+x3+x4); E4=x4/(x1+x2+x3+x4); KmCaAct=150.0e-6; allo=1.0f/(1.0f+powf(KmCaAct/cass,2.0f)); JncxNa=3.0f*(E4*k7-E1*k8)+E3*k4pp-E2*k3pp; JncxCa=E2*k2-E1*k1; real INaCa_ss=0.2f*Gncx*allo*(zna*JncxNa+zca*JncxCa); real INaCa=INaCa_i+INaCa_ss; real k1p=949.5; real k1m=182.4; real k2p=687.2; real k2m=39.4; k3p=1899.0; real k3m=79300.0; k4p=639.0; real k4m=40.0; real Knai0=9.073; real Knao0=27.78; real delta=-0.1550f; real Knai=Knai0*expf((delta*v*F)/(3.0f*R*T)); real Knao=Knao0*expf(((1.0f-delta)*v*F)/(3.0f*R*T)); real Kki=0.5; real Kko=0.3582; real MgADP=0.05; real MgATP=9.8; real Kmgatp=1.698e-7; real H=1.0e-7; real eP=4.2; real Khp=1.698e-7; real Knap=224.0; real Kxkur=292.0; real P=eP/(1.0f+H/Khp+nai/Knap+ki/Kxkur); real a1=(k1p*powf(nai/Knai,3.0))/(powf(1.0f+nai/Knai,3.0)+powf(1.0f+ki/Kki,2.0f)-1.0f); real b1=k1m*MgADP; real a2=k2p; real b2=(k2m*powf(nao/Knao,3.0))/(powf(1.0f+nao/Knao,3.0)+powf(1.0f+ko/Kko,2.0f)-1.0f); real a3=(k3p*powf(ko/Kko,2.0f))/(powf(1.0f+nao/Knao,3.0)+powf(1.0f+ko/Kko,2.0f)-1.0f); real b3=(k3m*P*H)/(1.0f+MgATP/Kmgatp); real a4=(k4p*MgATP/Kmgatp)/(1.0f+MgATP/Kmgatp); real b4=(k4m*powf(ki/Kki,2.0f))/(powf(1.0f+nai/Knai,3.0)+powf(1.0f+ki/Kki,2.0f)-1.0f); x1=a4*a1*a2+b2*b4*b3+a2*b4*b3+b3*a1*a2; x2=b2*b1*b4+a1*a2*a3+a3*b1*b4+a2*a3*b4; x3=a2*a3*a4+b3*b2*b1+b2*b1*a4+a3*a4*b1; x4=b4*b3*b2+a3*a4*a1+b2*a4*a1+b3*b2*a1; E1=x1/(x1+x2+x3+x4); E2=x2/(x1+x2+x3+x4); E3=x3/(x1+x2+x3+x4); E4=x4/(x1+x2+x3+x4); real zk=1.0f; real JnakNa=3.0f*(E1*a3-E2*b3); real JnakK=2.0f*(E4*b1-E3*a1); real Pnak=30; if (celltype==1) { Pnak*=0.9; } if (celltype==2) { Pnak*=0.7; } real INaK=Pnak*(zna*JnakNa+zk*JnakK); real xkb=1.0f/(1.0f+expf(-(v-14.48f)/18.34f)); real GKb=0.003; if (celltype==1) { GKb*=0.6; } real IKb=GKb*xkb*(v-EK); real PNab=3.75e-10; real INab=PNab*vffrt*(nai*expf(vfrt)-nao)/(expf(vfrt)-1.0f); real PCab=2.5e-8; real ICab=PCab*4.0f*vffrt*(cai*expf(2.0f*vfrt)-0.341f*cao)/(expf(2.0f*vfrt)-1.0f); real GpCa=0.0005; real IpCa=GpCa*cai/(0.0005f+cai); //voltage() v+=-dt*(INa+INaL+Ito+ICaL+ICaNa+ICaK+IKr+IKs+IK1+INaCa+INaK+INab+IKb+IpCa+ICab+stim_current); CaMKb = CaMKo*(1.0f-CaMKt)/(1.0f+KmCaM/cass); CaMKa = CaMKb+CaMKt; CaMKt+=dt*(aCaMK*CaMKb*(CaMKb+CaMKt)-bCaMK*CaMKt); real JdiffNa=(nass-nai)/2.0f; real JdiffK=(kss-ki)/2.0f; real Jdiff=(cass-cai)/0.2f; real bt=4.75; real a_rel=0.5f*bt; real Jrel_inf=a_rel*(-ICaL)/(1.0f+powf(1.5f/cajsr,8.0)); if (celltype==2) { Jrel_inf*=1.7; } real tau_rel=bt/(1.0f+0.0123f/cajsr); if (tau_rel<0.005) { tau_rel=0.005; } Jrelnp=Jrel_inf-(Jrel_inf-Jrelnp)*expf(-dt/tau_rel); real btp=1.25f*bt; real a_relp=0.5f*btp; real Jrel_infp=a_relp*(-ICaL)/(1.0f+powf(1.5f/cajsr,8.0f)); if (celltype==2) { Jrel_infp*=1.7; } real tau_relp=btp/(1.0f+0.0123f/cajsr); if (tau_relp<0.005f) { tau_relp=0.005f; } Jrelp=Jrel_infp-(Jrel_infp-Jrelp)*expf(-dt/tau_relp); real fJrelp=(1.0f/(1.0f+KmCaMK/CaMKa)); real Jrel=(1.0f-fJrelp)*Jrelnp+fJrelp*Jrelp; real Jupnp=0.004375f*cai/(cai+0.00092f); real Jupp=2.75f*0.004375f*cai/(cai+0.00092f-0.00017f); if (celltype==1) { Jupnp*=1.3; Jupp*=1.3; } real fJupp=(1.0f/(1.0f+KmCaMK/CaMKa)); real Jleak=0.0039375f*cansr/15.0f; real Jup=(1.0f-fJupp)*Jupnp+fJupp*Jupp-Jleak; real Jtr=(cansr-cajsr)/100.0f; nai+=dt*(-(INa+INaL+3.0*INaCa_i+3.0*INaK+INab)*Acap/(F*vmyo)+JdiffNa*vss/vmyo); nass+=dt*(-(ICaNa+3.0*INaCa_ss)*Acap/(F*vss)-JdiffNa); ki+=dt*(-(Ito+IKr+IKs+IK1+IKb+stim_current-2.0f*INaK)*Acap/(F*vmyo)+JdiffK*vss/vmyo); kss+=dt*(-(ICaK)*Acap/(F*vss)-JdiffK); real Bcai; if (celltype==1) { Bcai=1.0f/(1.0f+1.3f*cmdnmax*kmcmdn/powf(kmcmdn+cai,2.0f)+trpnmax*kmtrpn/powf(kmtrpn+cai,2.0f)); } else { Bcai=1.0f/(1.0f+cmdnmax*kmcmdn/powf(kmcmdn+cai,2.0f)+trpnmax*kmtrpn/powf(kmtrpn+cai,2.0f)); } cai+=dt*(Bcai*(-(IpCa+ICab-2.0f*INaCa_i)*Acap/(2.0f*F*vmyo)-Jup*vnsr/vmyo+Jdiff*vss/vmyo)); real Bcass=1.0f/(1.0f+BSRmax*KmBSR/powf(KmBSR+cass,2.0f)+BSLmax*KmBSL/powf(KmBSL+cass,2.0f)); cass+=dt*(Bcass*(-(ICaL-2.0f*INaCa_ss)*Acap/(2.0f*F*vss)+Jrel*vjsr/vss-Jdiff)); cansr+=dt*(Jup-Jtr*vjsr/vnsr); real Bcajsr=1.0f/(1.0f+csqnmax*kmcsqn/powf(kmcsqn+cajsr,2.0f)); cajsr+=dt*(Bcajsr*(Jtr-Jrel)); rDY_[0] = v; rDY_[1] = nai; rDY_[2] = nass; rDY_[3] = ki; rDY_[4] = kss; rDY_[5] = cai; rDY_[6] = cass; rDY_[7] = cansr; rDY_[8] = cajsr; rDY_[9] = m; rDY_[10] = hf; rDY_[11] = hs; rDY_[12] = j; rDY_[13] = hsp; rDY_[14] = jp; rDY_[15] = mL; rDY_[16] = hL; rDY_[17] = hLp; rDY_[18] = a; rDY_[19] = iF; rDY_[20] = iS; rDY_[21] = ap; rDY_[22] = iFp; rDY_[23] = iSp; rDY_[24] = d; rDY_[25] = ff; rDY_[26] = fs; rDY_[27] = fcaf; rDY_[28] = fcas; rDY_[29] = jca; rDY_[30] = nca; rDY_[31] = ffp; rDY_[32] = fcafp; rDY_[33] = xrf; rDY_[34] = xrs; rDY_[35] = xs1; rDY_[36] = xs2; rDY_[37] = xk1; rDY_[38] = Jrelnp; rDY_[39] = Jrelp; rDY_[40] = CaMKt; }
calculate_signed_distance_to_3d_skin_process.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Daniel Baumgaertner // Johannes Wolf // #if !defined(KRATOS_CALCULATE_DISTANCE_PROCESS_H_INCLUDED ) #define KRATOS_CALCULATE_DISTANCE_PROCESS_H_INCLUDED // System includes #include <string> #include <iostream> #include <ctime> // External includes // Project includes #include "includes/define.h" #include "processes/process.h" #include "includes/model_part.h" #include "includes/deprecated_variables.h" #include "spatial_containers/octree_binary.h" #include "utilities/spatial_containers_configure.h" #include "utilities/timer.h" #include "utilities/math_utils.h" #include "utilities/geometry_utilities.h" #include "geometries/triangle_3d_3.h" #include "geometries/quadrilateral_3d_4.h" #include "utilities/body_normal_calculation_utils.h" #include "includes/kratos_flags.h" #include "utilities/binbased_fast_point_locator.h" #include "utilities/binbased_nodes_in_element_locator.h" #include "processes/calculate_distance_to_skin_process.h" #ifdef _OPENMP #include "omp.h" #endif using namespace boost::numeric::ublas; namespace Kratos { class DistanceSpatialContainersConfigure { public: class CellNodeData { double mDistance; double mCoordinates[3]; std::size_t mId; public: double& Distance(){return mDistance;} double& X() {return mCoordinates[0];} double& Y() {return mCoordinates[1];} double& Z() {return mCoordinates[2];} double& operator[](int i) {return mCoordinates[i];} std::size_t& Id(){return mId;} }; ///@name Type Definitions ///@{ enum { Dimension = 3, DIMENSION = 3, MAX_LEVEL = 12, MIN_LEVEL = 2 // this cannot be less than 2!!! }; typedef Point PointType; /// always the point 3D typedef std::vector<double>::iterator DistanceIteratorType; typedef ModelPart::ElementsContainerType::ContainerType ContainerType; typedef ContainerType::value_type PointerType; typedef ContainerType::iterator IteratorType; typedef ModelPart::ElementsContainerType::ContainerType ResultContainerType; typedef ResultContainerType::value_type ResultPointerType; typedef ResultContainerType::iterator ResultIteratorType; typedef Element::Pointer pointer_type; typedef CellNodeData cell_node_data_type; typedef std::vector<CellNodeData*> data_type; typedef std::vector<PointerType>::iterator PointerTypeIterator; /// Pointer definition of DistanceSpatialContainersConfigure KRATOS_CLASS_POINTER_DEFINITION(DistanceSpatialContainersConfigure); ///@} ///@name Life Cycle ///@{ /// Default constructor. DistanceSpatialContainersConfigure() {} /// Destructor. virtual ~DistanceSpatialContainersConfigure() {} ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ static data_type* AllocateData() { return new data_type(27, (CellNodeData*)NULL); } static void CopyData(data_type* source, data_type* destination) { *destination = *source; } static void DeleteData(data_type* data) { delete data; } static inline void CalculateBoundingBox(const PointerType& rObject, PointType& rLowPoint, PointType& rHighPoint) { rHighPoint = rObject->GetGeometry().GetPoint(0); rLowPoint = rObject->GetGeometry().GetPoint(0); for (unsigned int point = 0; point<rObject->GetGeometry().PointsNumber(); point++) { for(std::size_t i = 0; i<3; i++) { rLowPoint[i] = (rLowPoint[i] > rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rLowPoint[i]; rHighPoint[i] = (rHighPoint[i] < rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rHighPoint[i]; } } } static inline void GetBoundingBox(const PointerType rObject, double* rLowPoint, double* rHighPoint) { for(std::size_t i = 0; i<3; i++) { rLowPoint[i] = rObject->GetGeometry().GetPoint(0)[i]; rHighPoint[i] = rObject->GetGeometry().GetPoint(0)[i]; } for (unsigned int point = 0; point<rObject->GetGeometry().PointsNumber(); point++) { for(std::size_t i = 0; i<3; i++) { rLowPoint[i] = (rLowPoint[i] > rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rLowPoint[i]; rHighPoint[i] = (rHighPoint[i] < rObject->GetGeometry().GetPoint(point)[i] ) ? rObject->GetGeometry().GetPoint(point)[i] : rHighPoint[i]; } } } static inline bool Intersection(const PointerType& rObj_1, const PointerType& rObj_2) { Element::GeometryType& geom_1 = rObj_1->GetGeometry(); Element::GeometryType& geom_2 = rObj_2->GetGeometry(); return geom_1.HasIntersection(geom_2); } static inline bool IntersectionBox(const PointerType& rObject, const PointType& rLowPoint, const PointType& rHighPoint) { return rObject->GetGeometry().HasIntersection(rLowPoint, rHighPoint); } static inline bool IsIntersected(const Element::Pointer rObject, double Tolerance, const double* rLowPoint, const double* rHighPoint) { Point low_point(rLowPoint[0] - Tolerance, rLowPoint[1] - Tolerance, rLowPoint[2] - Tolerance); Point high_point(rHighPoint[0] + Tolerance, rHighPoint[1] + Tolerance, rHighPoint[2] + Tolerance); KRATOS_THROW_ERROR(std::logic_error, "Not Implemented method", "") //return HasIntersection(rObject->GetGeometry(), low_point, high_point); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. virtual std::string Info() const { return " Spatial Containers Configure"; } /// Print information about this object. virtual void PrintInfo(std::ostream& rOStream) const {} /// Print object's data. virtual void PrintData(std::ostream& rOStream) const {} ///@} protected: private: /// Assignment operator. DistanceSpatialContainersConfigure& operator=(DistanceSpatialContainersConfigure const& rOther); /// Copy constructor. DistanceSpatialContainersConfigure(DistanceSpatialContainersConfigure const& rOther); }; // Class DistanceSpatialContainersConfigure ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /// Short class definition. /** Detail class definition. */ class CalculateSignedDistanceTo3DSkinProcess : public Process { public: ///@name Type Definitions ///@{ /// Pointer definition of CalculateSignedDistanceTo3DSkinProcess KRATOS_CLASS_POINTER_DEFINITION(CalculateSignedDistanceTo3DSkinProcess); typedef DistanceSpatialContainersConfigure ConfigurationType; typedef OctreeBinaryCell<ConfigurationType> CellType; typedef OctreeBinary<CellType> OctreeType; typedef ConfigurationType::cell_node_data_type CellNodeDataType; typedef Point PointType; /// always the point 3D typedef OctreeType::cell_type::object_container_type object_container_type; typedef struct{ array_1d<double,3> Coordinates; array_1d<double,3> StructElemNormal; unsigned int EdgeNode1; unsigned int EdgeNode2; }IntersectionNodeStruct; typedef struct{ std::vector<IntersectionNodeStruct> IntNodes; }TetEdgeStruct; ///@} ///@name Life Cycle ///@{ /// Constructor. CalculateSignedDistanceTo3DSkinProcess(ModelPart& rThisModelPartStruc, ModelPart& rThisModelPartFluid) : mrSkinModelPart(rThisModelPartStruc), mrBodyModelPart(rThisModelPartStruc), mrFluidModelPart(rThisModelPartFluid) { } /// Destructor. ~CalculateSignedDistanceTo3DSkinProcess() override { } ///@} ///@name Operators ///@{ void operator()() { Execute(); } ///@} ///@name Operations ///@{ ///****************************************************************************************************************** ///****************************************************************************************************************** void Execute() override { KRATOS_TRY; GenerateOctree(); //DistanceFluidStructure(); CalculateDistanceToSkinProcess<3> distance_process(mrFluidModelPart, mrBodyModelPart); distance_process.Execute(); // ------------------------------------------------------------------ // GenerateNodes(); CalculateDistance2(); // I have to change this. Pooyan. //mrSkinModelPart.GetCommunicator().AssembleCurrentData(DISTANCE); // std::ofstream mesh_file1("octree1.post.msh"); // std::ofstream res_file("octree1.post.res"); // Timer::Start("Writing Gid conform Mesh"); // PrintGiDMesh(mesh_file1); // PrintGiDResults(res_file); // octree.PrintGiDMeshNew(mesh_file2); // Timer::Stop("Writing Gid conform Mesh"); // delete octree. TODO: Carlos // ------------------------------------------------------------------ KRATOS_CATCH(""); } ///****************************************************************************************************************** ///****************************************************************************************************************** /** * This function maps the nodal pressure values computed in the CFD analysis to the respective * structural nodes, i.e. for each structural node inside a fluid tetrahedra positive and negative * face pressure is computed by mapping between the nodal values of the tetrahedra. Afterwards * the resulting delta is applied as new nodal pressure. */ void MappingPressureToStructure(BinBasedFastPointLocator<3>& node_locator) { //loop over nodes and find the tetra in which it falls, than do interpolation Vector N; const int max_results = 10000; BinBasedFastPointLocator<3>::ResultContainerType results(max_results); const int n_structure_nodes = mrSkinModelPart.Nodes().size(); #pragma omp parallel for firstprivate(results,N) //MY NEW LOOP: reset the viisted flaf for (int i = 0; i < n_structure_nodes; i++) { ModelPart::NodesContainerType::iterator iparticle = mrSkinModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_structure_node = *(iparticle.base()); p_structure_node->Set(VISITED, false); } for (int i = 0; i < n_structure_nodes; i++) { ModelPart::NodesContainerType::iterator iparticle = mrSkinModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_structure_node = *(iparticle.base()); BinBasedFastPointLocator<3>::ResultIteratorType result_begin = results.begin(); Element::Pointer pElement; bool is_found = node_locator.FindPointOnMesh(p_structure_node->Coordinates(), N, pElement, result_begin, max_results); if (is_found == true) { array_1d<double,4> nodalPressures; const Vector& ElementalDistances = pElement->GetValue(ELEMENTAL_DISTANCES); Geometry<Node<3> >& geom = pElement->GetGeometry(); for(unsigned int j=0; j<geom.size(); j++) { nodalPressures[j] = geom[j].FastGetSolutionStepValue(PRESSURE); } if(pElement->GetValue(SPLIT_ELEMENT)==true) { array_1d<double,4> Npos,Nneg; // Do mapping ComputeDiscontinuousInterpolation((*p_structure_node),pElement->GetGeometry(),ElementalDistances,Npos,Nneg); // Compute face pressure double p_positive_structure = inner_prod(nodalPressures,Npos); double p_negative_structure = inner_prod(nodalPressures,Nneg); // Assign ModelPart::ElementIteratorface pressure to structure node p_structure_node->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE) = p_positive_structure; p_structure_node->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE) = p_negative_structure; p_structure_node->Set(VISITED); } else { double p = inner_prod(nodalPressures,N); p_structure_node->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE) = p; p_structure_node->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE) = p; p_structure_node->Set(VISITED); } } } //AND NOW WE "TREAT" the bad nodes, the ones that belong to the structural faces that by some chance did not cross the fluid elements //to such nodes we simply extrapolate the pressure from the neighbors int n_bad_nodes=0; for (int i = 0; i < n_structure_nodes; i++) { ModelPart::NodesContainerType::iterator iparticle = mrSkinModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_structure_node = *(iparticle.base()); if (p_structure_node->IsNot(VISITED)) n_bad_nodes++; } //KRATOS_WATCH("THERE WERE THIS MANY BAD NODES ORIGINALLY") //KRATOS_WATCH(n_bad_nodes) while (n_bad_nodes >= 1.0) { int n_bad_nodes_backup = n_bad_nodes; for (int i = 0; i < n_structure_nodes; i++) { ModelPart::NodesContainerType::iterator iparticle = mrSkinModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_structure_node = *(iparticle.base()); //here we store the number of neigbor nodes that were given the pressure in the previous loop (i.e. were found) if (p_structure_node->IsNot(VISITED)) { int n_good_neighbors = 0; double pos_pres = 0.0; double neg_pres = 0.0; GlobalPointersVector< Node < 3 > >& neighours = p_structure_node->GetValue(NEIGHBOUR_NODES); for (GlobalPointersVector< Node < 3 > >::iterator j = neighours.begin(); j != neighours.end(); j++) { if (j->Is(VISITED)) { n_good_neighbors++; pos_pres += j->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE); neg_pres += j->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE); //KRATOS_WATCH("Good neighbor found") } } if (n_good_neighbors != 0) { pos_pres /= n_good_neighbors; neg_pres /= n_good_neighbors; p_structure_node->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE) = pos_pres; p_structure_node->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE) = neg_pres; p_structure_node->Set(VISITED); n_bad_nodes--; } //KRATOS_WATCH(pos_pres) //KRATOS_WATCH(neg_pres) } } if(n_bad_nodes == n_bad_nodes_backup) break; //WE BREAK THE WHILE HERE, OTHERWISE THE CODE HANGS (it was not able to remove any other node) /*int n_bad_nodes=0; for (int i = 0; i < n_structure_nodes; i++) { ModelPart::NodesContainerType::iterator iparticle = mrSkinModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_structure_node = *(iparticle.base()); if (p_structure_node->IsNot(VISITED)) n_bad_nodes++; } */ //KRATOS_WATCH(n_bad_nodes) } //THE BELOW ONE IS A "CHEAT".. THERE IS A PROBLEM OF INCORRECT PROJECTION BETWEEN THE MESHES AT SOME POINTS //FOR NODES WITH PRESSURE VERY DIFFERENT FROM THAT OF THE NEIGHBORS, I JUST TAKE THE NEIGHBOR PRESSURE AVERAGED for (int i = 0; i < n_structure_nodes; i++) { ModelPart::NodesContainerType::iterator iparticle = mrSkinModelPart.NodesBegin() + i; Node < 3 > ::Pointer p_structure_node = *(iparticle.base()); double pos_pressure=p_structure_node->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE); double neg_pressure=p_structure_node->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE); GlobalPointersVector< Node < 3 > >& neighours = p_structure_node->GetValue(NEIGHBOUR_NODES); if (neighours.size()>=1.0) { double av_pos_pres=0.0; double av_neg_pres=0.0; for( GlobalPointersVector< Node<3> >::iterator j = neighours.begin(); j != neighours.end(); j++) { av_pos_pres+=j->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE); av_neg_pres+=j->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE); } av_pos_pres/=neighours.size(); av_neg_pres/=neighours.size(); //IF the average pressure of the neighbors is 10 times lower than of the given node, something is bad and we reset its value if (fabs(pos_pressure)>3.0*fabs(av_pos_pres)) { p_structure_node->FastGetSolutionStepValue(POSITIVE_FACE_PRESSURE) = av_pos_pres; //KRATOS_WATCH("BAD NODE") } if (fabs(neg_pressure)>3.0*fabs(av_neg_pres)) { p_structure_node->FastGetSolutionStepValue(NEGATIVE_FACE_PRESSURE) = av_neg_pres; //KRATOS_WATCH("BAD NODE") } } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void ComputeDiscontinuousInterpolation( const Node<3>& pNode, Geometry< Node<3> >& geom, const array_1d<double,4>& distances, array_1d<double,4>& Npos, array_1d<double,4>& Nneg) { //count positives int n_positives = 0; for(unsigned int i=0; i<distances.size(); i++) if(distances[i]>0) n_positives++; //generate the points on the edges at the zero of the distance function //generate "father nodes", defined as the end nodes of the edge on which the local point is located std::vector< Point > edge_points; edge_points.reserve(4); array_1d<unsigned int, 4> positive_fathers, negative_fathers; //there are at most 4 cut edges unsigned int k=0; unsigned int l=0; for(unsigned int i=0; i<3; i++) { for(unsigned int j=i+1; j<4; j++) // go through the edges 01, 02, 03, 12, 13, 23 { double di = distances[i]; double dj = distances[j]; if(di*dj < 0) //edge is cut { //generate point on edge by linear interpolation double Ni = fabs(dj) / ( fabs(di) + fabs(dj) ); double Nj = 1.0 - Ni; Point edge_point(Ni * geom[i] + Nj * geom[j]); edge_points.push_back(edge_point); //store the id of the positive and negative fathers if(di > 0.0) { positive_fathers[k++] = i; negative_fathers[l++] = j; } else { positive_fathers[k++] = j; negative_fathers[l++] = i; } } } } if(edge_points.size() == 3) { //compute local shape functions (tell how to interpolate from the edge nodes) Vector Nlocal(3); //form a triangle with the edge nodes Triangle3D3< Point > triangle(Point::Pointer(new Point(edge_points[0])), Point::Pointer(new Point(edge_points[1])), Point::Pointer(new Point(edge_points[2])) ); array_1d<double,3> local_coords; local_coords = triangle.PointLocalCoordinates(local_coords, pNode); for(unsigned int i=0; i<3;i++) Nlocal[i] = triangle.ShapeFunctionValue(i, local_coords ); noalias(Npos) = ZeroVector(4); noalias(Nneg) = ZeroVector(4); for(unsigned int i=0; i<3; i++) { Npos[ positive_fathers[i] ] += Nlocal[i]; Nneg[ negative_fathers[i] ] += Nlocal[i]; } } if(edge_points.size() == 4) { //compute local shape functions (tell how to interpolate from the edge nodes) Vector Nlocal(4); //form a quadrilatera with the 4 cut nodes array_1d<double,3> x21 = edge_points[1] - edge_points[0]; array_1d<double,3> x31 = edge_points[2] - edge_points[0]; array_1d<double,3> x41 = edge_points[3] - edge_points[0]; //define a vector oriented as x21 array_1d<double,3> v1 = x21 / norm_2(x21); BoundedMatrix<double,4,3> DN_DX; array_1d<double,4> msN; double Area; GeometryUtils::CalculateGeometryData( geom, DN_DX, msN, Area ); array_1d<double,3> n = prod(trans(DN_DX),distances); n /= norm_2(n); array_1d<double,3> v2; MathUtils<double>::CrossProduct(v2,v1,n); // v2 = v1 x n array_1d<double,3> angles; angles[0] = 0.0; //angle between x21 and v1 angles[1] = atan2( inner_prod(x31,v2), inner_prod(x31,v1) ); //angle between x31 and v1 angles[2] = atan2( inner_prod(x41,v2), inner_prod(x41,v1) ); //angle between x31 and v1 double max_angle = 0.0; double min_angle = 0.0; unsigned int min_pos = 1; unsigned int max_pos = 1; for(unsigned int i=1; i<3; i++) { if(angles[i] < min_angle) { min_pos = i+1; //this is the local index of the edge point which forms the minimal angle min_angle = angles[i]; } else if(angles[i] > max_angle) { max_pos = i+1; //this is the local index of the edge point which forms the maximal angle max_angle = angles[i]; } } //find the pos of the center node unsigned int center_pos = 0; for(unsigned int i=1; i<4; i++) { if((i!= min_pos) && (i!=max_pos)) { center_pos = i; } } //form a quadrilateral with the edge nodes Quadrilateral3D4< Point > quad = Quadrilateral3D4< Point >( Point::Pointer(new Point(edge_points[0])), Point::Pointer(new Point(edge_points[min_pos])), Point::Pointer(new Point(edge_points[center_pos])), Point::Pointer(new Point(edge_points[max_pos])) ); array_1d<double,3> local_coords; local_coords = quad.PointLocalCoordinates(local_coords, pNode); array_1d<unsigned int, 4> indices; indices[0] = 0; indices[1] = min_pos; indices[2] = center_pos; indices[3] = max_pos; for(unsigned int i=0; i<4;i++) Nlocal[ i ] = quad.ShapeFunctionValue(i, local_coords ); noalias(Npos) = ZeroVector(4); noalias(Nneg) = ZeroVector(4); for(unsigned int i=0; i<4; i++) { Npos[ positive_fathers[i] ] += Nlocal[indices[i]]; Nneg[ negative_fathers[i] ] += Nlocal[indices[i]]; } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void AveragePressureToNode(BinBasedFastPointLocator<3>& node_locator, Node<3>& node) { //loop over nodes and find the tetra in which it falls, than do interpolation Vector N; const int max_results = 10000; BinBasedFastPointLocator<3>::ResultContainerType results(max_results); BinBasedFastPointLocator<3>::ResultIteratorType result_begin = results.begin(); Element::Pointer pElement; bool is_found = node_locator.FindPointOnMesh(node.Coordinates(), N, pElement, result_begin, max_results); if (is_found == true) { array_1d<double,4> nodalPressures; const Vector& ElementalDistances = pElement->GetValue(ELEMENTAL_DISTANCES); Geometry<Node<3> >& geom = pElement->GetGeometry(); for(unsigned int i=0; i<4; i++) nodalPressures[i] = geom[i].GetSolutionStepValue(PRESSURE); if(pElement->GetValue(SPLIT_ELEMENT)==true) { // Compute average of all positive and all negative values double positiveAverage = 0; double negativeAverage = 0; unsigned int nPos = 0; unsigned int nNeg = 0; for(unsigned int i=0 ; i<4 ; i++) { if(ElementalDistances[i]>=0) { positiveAverage += nodalPressures[i]; nPos++; } else { negativeAverage += nodalPressures[i]; nNeg++; } } positiveAverage /= nPos; negativeAverage /= nNeg; // Assign Pressures node.GetSolutionStepValue(POSITIVE_FACE_PRESSURE,0) = positiveAverage; node.GetSolutionStepValue(NEGATIVE_FACE_PRESSURE,0) = negativeAverage; } else { // Compute average of all positive and all negative values double Average = 0; // for output of for(unsigned int i = 0 ; i<4 ; i++) Average += nodalPressures[i]; Average /= 4; // Assign Pressures node.GetSolutionStepValue(POSITIVE_FACE_PRESSURE,0) = Average; node.GetSolutionStepValue(NEGATIVE_FACE_PRESSURE,0) = Average; } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void DistanceFluidStructure() { //std::cout << "Start calculating Elemental distances..." << std::endl; // Initialize Elemental distances in the domain Initialize(); // Initialize index table that defines line Edges of fluid Element BoundedMatrix<unsigned int,6,2> TetEdgeIndexTable; SetIndexTable(TetEdgeIndexTable); // loop over all fluid Elements // this loop is parallelized using openmp #ifdef _OPENMP int number_of_threads = omp_get_max_threads(); #else int number_of_threads = 1; #endif ModelPart::ElementsContainerType& pElements = mrFluidModelPart.Elements(); DenseVector<unsigned int> Element_partition; CreatePartition(number_of_threads, pElements.size(), Element_partition); #pragma omp parallel for for (int k = 0; k < number_of_threads; k++) { ModelPart::ElementsContainerType::iterator it_begin = pElements.ptr_begin() + Element_partition[k]; ModelPart::ElementsContainerType::iterator it_end = pElements.ptr_begin() + Element_partition[k+1]; // assemble all Elements for (ModelPart::ElementIterator it = it_begin; it != it_end; ++it) { CalcElementDistances( it , TetEdgeIndexTable ); } } // Finally, each tetrahedral Element has 4 distance values. But each node belongs to // several Elements, such that it is assigned several distance values // --> now synchronize these values by finding the minimal distance and assign to each node a minimal nodal distance AssignMinimalNodalDistance(); //std::cout << "Finished calculating Elemental distances..." << std::endl; } ///****************************************************************************************************************** ///****************************************************************************************************************** void Initialize() { const double initial_distance = 1.0; ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray(); // reset the node distance to 1.0 which is the maximum distance in our normalized space. int nodesSize = nodes.size(); #pragma omp parallel for firstprivate(nodesSize) for(int i = 0 ; i < nodesSize ; i++) nodes[i]->GetSolutionStepValue(DISTANCE) = initial_distance; ModelPart::ElementsContainerType::ContainerType& fluid_Elements = mrFluidModelPart.ElementsArray(); array_1d<double,4> ElementalDistances; ElementalDistances[0] = initial_distance; ElementalDistances[1] = initial_distance; ElementalDistances[2] = initial_distance; ElementalDistances[3] = initial_distance; // reset the Elemental distance to 1.0 which is the maximum distance in our normalized space. // also initialize the embedded velocity of the fluid Element int ElementsSize = fluid_Elements.size(); #pragma omp parallel for firstprivate(ElementsSize) for(int i = 0 ; i < ElementsSize ; i++) { fluid_Elements[i]->GetValue(ELEMENTAL_DISTANCES) = ElementalDistances; fluid_Elements[i]->GetValue(SPLIT_ELEMENT) = false; fluid_Elements[i]->GetValue(EMBEDDED_VELOCITY)=ZeroVector(3); } } ///****************************************************************************************************************** ///****************************************************************************************************************** void SetIndexTable( BoundedMatrix<unsigned int,6,2>& TetEdgeIndexTable ) { // Initialize index table to define line Edges of fluid Element TetEdgeIndexTable(0,0) = 0; TetEdgeIndexTable(0,1) = 1; TetEdgeIndexTable(1,0) = 0; TetEdgeIndexTable(1,1) = 2; TetEdgeIndexTable(2,0) = 0; TetEdgeIndexTable(2,1) = 3; TetEdgeIndexTable(3,0) = 1; TetEdgeIndexTable(3,1) = 2; TetEdgeIndexTable(4,0) = 1; TetEdgeIndexTable(4,1) = 3; TetEdgeIndexTable(5,0) = 2; TetEdgeIndexTable(5,1) = 3; } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalcElementDistances( ModelPart::ElementsContainerType::iterator& i_fluidElement, BoundedMatrix<unsigned int,6,2> TetEdgeIndexTable ) { std::vector<OctreeType::cell_type*> leaves; std::vector<TetEdgeStruct> IntersectedTetEdges; unsigned int NumberIntersectionsOnTetCorner = 0; // Get leaves of octree intersecting with fluid Element mpOctree->GetIntersectedLeaves(*(i_fluidElement).base(),leaves); int intersection_counter = 0; // Loop over all 6 line Edges of the tetrahedra for(unsigned int i_tetEdge = 0; i_tetEdge < 6; i_tetEdge++) { IdentifyIntersectionNodes( i_fluidElement, i_tetEdge, leaves, IntersectedTetEdges, NumberIntersectionsOnTetCorner, TetEdgeIndexTable, intersection_counter ); } if (intersection_counter!=0) { i_fluidElement->GetValue(EMBEDDED_VELOCITY) /= intersection_counter; } if(IntersectedTetEdges.size() > 0) CalcDistanceTo3DSkin( IntersectedTetEdges , i_fluidElement , NumberIntersectionsOnTetCorner ); } ///****************************************************************************************************************** ///****************************************************************************************************************** void IdentifyIntersectionNodes( ModelPart::ElementsContainerType::iterator& i_fluidElement, unsigned int i_tetEdge, std::vector<OctreeType::cell_type*>& leaves, std::vector<TetEdgeStruct>& IntersectedTetEdges, unsigned int& NumberIntersectionsOnTetCorner, BoundedMatrix<unsigned int,6,2> TetEdgeIndexTable, int& intersection_counter ) { std::vector<unsigned int> IntersectingStructElemID; TetEdgeStruct NewTetEdge; unsigned int NumberIntersectionsOnTetCornerCurrentEdge = 0; // Get nodes of line Edge unsigned int EdgeStartIndex = TetEdgeIndexTable(i_tetEdge,0); unsigned int EdgeEndIndex = TetEdgeIndexTable(i_tetEdge,1); PointType& P1 = i_fluidElement->GetGeometry()[EdgeStartIndex]; PointType& P2 = i_fluidElement->GetGeometry()[EdgeEndIndex]; double EdgeNode1[3] = {P1.X() , P1.Y() , P1.Z()}; double EdgeNode2[3] = {P2.X() , P2.Y() , P2.Z()}; // loop over all octree cells which are intersected by the fluid Element for(unsigned int i_cell = 0 ; i_cell < leaves.size() ; i_cell++) { // Structural Element contained in one cell of the octree object_container_type* struct_elem = (leaves[i_cell]->pGetObjects()); // loop over all structural Elements within each octree cell for(object_container_type::iterator i_StructElement = struct_elem->begin(); i_StructElement != struct_elem->end(); i_StructElement++) { if( StructuralElementNotYetConsidered( (*i_StructElement)->Id() , IntersectingStructElemID ) ) { // Calculate and associate intersection point to the current fluid Element double IntersectionPoint[3] = {0.0 , 0.0 , 0.0}; int TetEdgeHasIntersections = IntersectionTriangleSegment( (*i_StructElement)->GetGeometry() , EdgeNode1 , EdgeNode2 , IntersectionPoint ); if( TetEdgeHasIntersections == 1 ) { IntersectionNodeStruct NewIntersectionNode; // Assign information to the intersection node NewIntersectionNode.Coordinates[0] = IntersectionPoint[0]; NewIntersectionNode.Coordinates[1] = IntersectionPoint[1]; NewIntersectionNode.Coordinates[2] = IntersectionPoint[2]; if( IsIntersectionNodeOnTetEdge( IntersectionPoint , EdgeNode1 , EdgeNode2 ) ) { if ( IsNewIntersectionNode( NewIntersectionNode , IntersectedTetEdges ) ) { // Calculate normal of the structural Element at the position of the intersection point CalculateNormal3D((*i_StructElement)->GetGeometry()[0], (*i_StructElement)->GetGeometry()[1], (*i_StructElement)->GetGeometry()[2], NewIntersectionNode.StructElemNormal); // check, how many intersection nodes are located on corner points of the tetrahedra if ( IsIntersectionOnCorner( NewIntersectionNode , EdgeNode1 , EdgeNode2) ) { NumberIntersectionsOnTetCornerCurrentEdge++; // only allow one intersection node on a tet edge if(NumberIntersectionsOnTetCornerCurrentEdge < 2) { // add the new intersection point to the list of intersection points of the fluid Element NewIntersectionNode.EdgeNode1 = EdgeStartIndex; NewIntersectionNode.EdgeNode2 = EdgeEndIndex; NewTetEdge.IntNodes.push_back(NewIntersectionNode); // if tet edge belonging to this intersection point is not already marked as "IntersectedTetEdge" --> put it into the respective container // when a second intersection node is found, then it is not necessary to push_back again if( NewTetEdge.IntNodes.size() == 1 ) IntersectedTetEdges.push_back(NewTetEdge); } // this corner intersection node is only considered once for each tet edge if(NumberIntersectionsOnTetCornerCurrentEdge==1) { NumberIntersectionsOnTetCorner++; } } else { // add the new intersection point to the list of intersection points of the fluid Element NewIntersectionNode.EdgeNode1 = EdgeStartIndex; NewIntersectionNode.EdgeNode2 = EdgeEndIndex; NewTetEdge.IntNodes.push_back(NewIntersectionNode); // velocity mapping structure --> fluid array_1d<double,3> emb_vel = (*i_StructElement)->GetGeometry()[0].GetSolutionStepValue(VELOCITY); emb_vel += (*i_StructElement)->GetGeometry()[1].GetSolutionStepValue(VELOCITY); emb_vel += (*i_StructElement)->GetGeometry()[2].GetSolutionStepValue(VELOCITY); i_fluidElement->GetValue(EMBEDDED_VELOCITY) += emb_vel/3; intersection_counter++; } } } } } } } // Finally put the found intersection nodes into the container if( NewTetEdge.IntNodes.size() > 0 ) { if(NumberIntersectionsOnTetCornerCurrentEdge == 0) IntersectedTetEdges.push_back(NewTetEdge); } } ///****************************************************************************************************************** ///****************************************************************************************************************** bool StructuralElementNotYetConsidered( unsigned int IDCurrentStructElem, std::vector<unsigned int>& IntersectingStructElemID ) { // check if the structural Element was already considered as intersecting Element for(unsigned int k = 0 ; k < IntersectingStructElemID.size() ; k++) { if( IDCurrentStructElem == IntersectingStructElemID[k] ) return false; } // if structural Element has not been considered in another octree, which also intersects the fluid Element // add the new object ID to the vector IntersectingStructElemID.push_back( IDCurrentStructElem ); return true; } ///****************************************************************************************************************** ///****************************************************************************************************************** bool IsIntersectionNodeOnTetEdge( double* IntersectionPoint, double* EdgeNode1, double* EdgeNode2 ) { // check, if intersection point is located on any edge of the fluid Element array_1d<double,3> ConnectVectTetNodeIntNode1; array_1d<double,3> ConnectVectTetNodeIntNode2; array_1d<double,3> EdgeVector; ConnectVectTetNodeIntNode1[0] = IntersectionPoint[0] - EdgeNode1[0]; ConnectVectTetNodeIntNode1[1] = IntersectionPoint[1] - EdgeNode1[1]; ConnectVectTetNodeIntNode1[2] = IntersectionPoint[2] - EdgeNode1[2]; ConnectVectTetNodeIntNode2[0] = IntersectionPoint[0] - EdgeNode2[0]; ConnectVectTetNodeIntNode2[1] = IntersectionPoint[1] - EdgeNode2[1]; ConnectVectTetNodeIntNode2[2] = IntersectionPoint[2] - EdgeNode2[2]; double LengthConnectVect1 = norm_2( ConnectVectTetNodeIntNode1 ); double LengthConnectVect2 = norm_2( ConnectVectTetNodeIntNode2 ); EdgeVector[0] = EdgeNode2[0] - EdgeNode1[0]; EdgeVector[1] = EdgeNode2[1] - EdgeNode1[1]; EdgeVector[2] = EdgeNode2[2] - EdgeNode1[2]; double MaxEdgeLength = norm_2( EdgeVector ); // if both connection vectors (corner point --> intersection point) // are smaller or equal to the edge length of tetrahedra, // then intersection point is located on the edge if( (LengthConnectVect1 <= (MaxEdgeLength)) && (LengthConnectVect2 <= (MaxEdgeLength)) ) return true; else return false; } ///****************************************************************************************************************** ///****************************************************************************************************************** bool IsNewIntersectionNode( IntersectionNodeStruct& NewIntersectionNode, std::vector<TetEdgeStruct>& IntersectedTetEdges ) { array_1d<double,3> DiffVector; double NormDiffVector = 0; unsigned int NumberIntNodes = 0; for( unsigned int i_TetEdge = 0 ; i_TetEdge < IntersectedTetEdges.size() ; i_TetEdge++ ) { NumberIntNodes = IntersectedTetEdges[i_TetEdge].IntNodes.size(); for( unsigned int i_IntNode = 0 ; i_IntNode < NumberIntNodes ; i_IntNode++ ) { DiffVector[0] = NewIntersectionNode.Coordinates[0] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[0]; DiffVector[1] = NewIntersectionNode.Coordinates[1] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[1]; DiffVector[2] = NewIntersectionNode.Coordinates[2] - IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode].Coordinates[2]; NormDiffVector = norm_2(DiffVector); if( NormDiffVector < epsilon ) return false; } } // if the new intersection node is not existing (as intersection with a corner point), then return false return true; } ///****************************************************************************************************************** ///****************************************************************************************************************** bool IsIntersectionOnCorner( IntersectionNodeStruct& NewIntersectionNode, double* EdgeNode1, double* EdgeNode2 ) { array_1d<double,3> DiffVector; double NormDiffVector; DiffVector[0] = EdgeNode1[0] - NewIntersectionNode.Coordinates[0]; DiffVector[1] = EdgeNode1[1] - NewIntersectionNode.Coordinates[1]; DiffVector[2] = EdgeNode1[2] - NewIntersectionNode.Coordinates[2]; NormDiffVector = norm_2(DiffVector); if( NormDiffVector < epsilon ) return true; DiffVector[0] = EdgeNode2[0] - NewIntersectionNode.Coordinates[0]; DiffVector[1] = EdgeNode2[1] - NewIntersectionNode.Coordinates[1]; DiffVector[2] = EdgeNode2[2] - NewIntersectionNode.Coordinates[2]; NormDiffVector = norm_2(DiffVector); if( NormDiffVector < epsilon ) return true; else return false; } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalculateNormal3D( Point& Point1, Point& Point2, Point& Point3, array_1d<double,3>& rResultNormal ) { array_1d<double,3> v1 = Point2 - Point1; array_1d<double,3> v2 = Point3 - Point1; MathUtils<double>::CrossProduct(rResultNormal,v1,v2); rResultNormal *= 0.5; } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalcDistanceTo3DSkin( std::vector<TetEdgeStruct>& IntersectedTetEdges, ModelPart::ElementsContainerType::iterator& i_fluid_Element, unsigned int NumberIntersectionsOnTetCorner ) { std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure; array_1d<double,4> ElementalDistances; FillIntNodesContainer(IntersectedTetEdges,NodesOfApproximatedStructure); // Intersection with one corner point if( NodesOfApproximatedStructure.size() == 1 && NumberIntersectionsOnTetCorner == 1 ) { CalcSignedDistancesToOneIntNode(i_fluid_Element,NodesOfApproximatedStructure,ElementalDistances); i_fluid_Element->GetValue(SPLIT_ELEMENT) = true; } // Intersection with two corner points / one tetrahedra edge if( NodesOfApproximatedStructure.size() == 2 && NumberIntersectionsOnTetCorner == 2 ) { CalcSignedDistancesToTwoIntNodes(i_fluid_Element,NodesOfApproximatedStructure,ElementalDistances); i_fluid_Element->GetValue(SPLIT_ELEMENT) = true; } // Intersection with three tetrahedra edges if( NodesOfApproximatedStructure.size() == 3 ) { CalcSignedDistancesToThreeIntNodes(i_fluid_Element,NodesOfApproximatedStructure,ElementalDistances); i_fluid_Element->GetValue(SPLIT_ELEMENT) = true; } // Intersection with more than three tetrahedra edges if( NodesOfApproximatedStructure.size() > 3 ) { CalcSignedDistancesToMoreThanThreeIntNodes(i_fluid_Element,NodesOfApproximatedStructure,ElementalDistances,IntersectedTetEdges); i_fluid_Element->GetValue(SPLIT_ELEMENT) = true; } // Postprocessing treatment of Elemental distances if( i_fluid_Element->GetValue(SPLIT_ELEMENT) == true ) AvoidZeroDistances(i_fluid_Element, ElementalDistances); // In case there is intersection with fluid Element: assign distances to the Element if( i_fluid_Element->GetValue(SPLIT_ELEMENT) == true ) i_fluid_Element->GetValue(ELEMENTAL_DISTANCES) = ElementalDistances; } ///****************************************************************************************************************** ///****************************************************************************************************************** void FillIntNodesContainer( std::vector<TetEdgeStruct>& IntersectedTetEdges, std::vector<IntersectionNodeStruct>& NodesOfApproximatedStructure ) { const unsigned int NumberCutEdges = IntersectedTetEdges.size(); for(unsigned int i_TetEdge = 0 ; i_TetEdge < NumberCutEdges ; i_TetEdge++) { unsigned int NumberIntNodes = IntersectedTetEdges[i_TetEdge].IntNodes.size(); for( unsigned int i_IntNode = 0 ; i_IntNode < NumberIntNodes ; i_IntNode++ ) { NodesOfApproximatedStructure.push_back(IntersectedTetEdges[i_TetEdge].IntNodes[i_IntNode]); } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalcSignedDistancesToOneIntNode( ModelPart::ElementsContainerType::iterator& i_fluid_Element, std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure, array_1d<double,4>& ElementalDistances ) { Geometry< Node<3> >& rFluidGeom = i_fluid_Element->GetGeometry(); Point P1; P1.Coordinates() = NodesOfApproximatedStructure[0].Coordinates; array_1d<double,3>& Normal = NodesOfApproximatedStructure[0].StructElemNormal; // Compute distance values for all tet-nodes for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++) { ElementalDistances[i_TetNode] = PointDistanceToPlane(P1, Normal, rFluidGeom[i_TetNode]); } } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalcSignedDistancesToTwoIntNodes( ModelPart::ElementsContainerType::iterator& i_fluid_Element, std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure, array_1d<double,4>& ElementalDistances ) { Geometry< Node<3> >& rFluidGeom = i_fluid_Element->GetGeometry(); Point P1; P1.Coordinates() = NodesOfApproximatedStructure[0].Coordinates; // Get normal at intersections, average them and check direction of distances array_1d<double,3> NormalAtIntersectionNode1 = NodesOfApproximatedStructure[0].StructElemNormal; array_1d<double,3> NormalAtIntersectionNode2 = NodesOfApproximatedStructure[1].StructElemNormal; // Compute normal of surface plane array_1d<double,3> Normal; Normal[0] = 0.5*(NormalAtIntersectionNode1[0] + NormalAtIntersectionNode2[0]); Normal[1] = 0.5*(NormalAtIntersectionNode1[1] + NormalAtIntersectionNode2[1]); Normal[2] = 0.5*(NormalAtIntersectionNode1[2] + NormalAtIntersectionNode2[2]); // Check whether orientation of normal is in direction of the normal of the intersecting structure // Note: The normal of the approx. surface can be max. 90deg to every surrounding normal of the structure at the intersection nodes const array_1d<double,3> NormalAtOneIntersectionNode = NodesOfApproximatedStructure[0].StructElemNormal; bool NormalWrongOriented = false; if(inner_prod(NormalAtOneIntersectionNode,Normal)<0) NormalWrongOriented = true; // switch direction of normal if(NormalWrongOriented) Normal *=-1; // Compute distance values for all tet-nodes for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++) { ElementalDistances[i_TetNode] = PointDistanceToPlane(P1, Normal, rFluidGeom[i_TetNode]); } } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalcSignedDistancesToThreeIntNodes( ModelPart::ElementsContainerType::iterator& i_fluid_Element, std::vector<IntersectionNodeStruct>& NodesOfApproximatedStructure, array_1d<double,4>& ElementalDistances ) { Geometry< Node<3> >& rFluidGeom = i_fluid_Element->GetGeometry(); Point P1; Point P2; Point P3; P1.Coordinates() = NodesOfApproximatedStructure[0].Coordinates; P2.Coordinates() = NodesOfApproximatedStructure[1].Coordinates; P3.Coordinates() = NodesOfApproximatedStructure[2].Coordinates; array_1d<double,3> Normal; CalculateNormal3D(P1,P2,P3,Normal); // Check whether orientation of normal is in direction of the normal of the intersecting structure // Note: The normal of the approx. surface can be max. 90deg to every surrounding normal of the structure at the intersection nodes const array_1d<double,3> NormalAtOneIntersectionNode = NodesOfApproximatedStructure[0].StructElemNormal; bool NormalWrongOriented = false; if(inner_prod(NormalAtOneIntersectionNode,Normal)<0) NormalWrongOriented = true; // switch direction of normal if(NormalWrongOriented) Normal *=-1; // Compute distance values for all tet-nodes for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++) { ElementalDistances[i_TetNode] = PointDistanceToPlane(P1, Normal, rFluidGeom[i_TetNode] ); } } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalcSignedDistancesToMoreThanThreeIntNodes( ModelPart::ElementsContainerType::iterator& i_fluid_Element, std::vector<IntersectionNodeStruct> NodesOfApproximatedStructure, array_1d<double,4>& ElementalDistances, std::vector<TetEdgeStruct>& IntersectedTetEdges ) { unsigned int numberCutEdges = NodesOfApproximatedStructure.size(); // Compute average of the intersection nodes which is a node on the plane we look for Point P_mean; for(unsigned int k=0; k<numberCutEdges; k++) for(unsigned int i=0; i<3; i++) P_mean.Coordinates()[i] += NodesOfApproximatedStructure[k].Coordinates[i]; for(unsigned int i=0; i<3; i++) P_mean.Coordinates()[i] /= numberCutEdges; // Compute normal for the best-fitted plane array_1d<double,3> N_mean; Matrix coordinates(numberCutEdges,3); for(unsigned int i=0; i<numberCutEdges; i++) for(unsigned int j=0; j<3; j++) coordinates(i,j) = NodesOfApproximatedStructure[i].Coordinates[j] - P_mean[j]; Matrix A = prod(trans(coordinates),coordinates); Matrix V(3,3); Vector lambda(3); // Calculate the eigenvectors V and the corresponding eigenvalues lambda EigenVectors(A, V, lambda); // Look for the minimal eigenvalue all lambdas unsigned int min_pos = 0; double min_lambda = lambda[min_pos]; for(unsigned int i=1;i<3; i++) if(min_lambda > lambda[i]) { min_lambda = lambda[i]; min_pos = i; } // the normal equals to the eigenvector which corresponds to the minimal eigenvalue for(unsigned int i=0;i<3; i++) N_mean[i] = V(min_pos,i); N_mean /= norm_2(N_mean); // Check whether orientation of normal is in direction of the normal of the intersecting structure // Note: The normal of the approx. surface can be max. 90deg to every surrounding normal of the structure at the intersection nodes array_1d<double,3> NormalAtOneIntersectionNode; NormalAtOneIntersectionNode = NodesOfApproximatedStructure[0].StructElemNormal; bool NormalWrongOriented = false; if(inner_prod(NormalAtOneIntersectionNode,N_mean)<0) NormalWrongOriented = true; // switch direction of normal if(NormalWrongOriented) N_mean *=-1; // Determine about the minimal distance by considering the distances to both triangles for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++) { ElementalDistances[i_TetNode] = PointDistanceToPlane(P_mean, N_mean, i_fluid_Element->GetGeometry()[i_TetNode] ); } // ################################################# unsigned int numberDoubleCutEdges = 0; unsigned int indexDoubleCutEdge = 0; // figure out the edges which are cut more than once for(unsigned int i_TetEdge = 0 ; i_TetEdge < IntersectedTetEdges.size() ; i_TetEdge++) { unsigned int NumberIntNodes = IntersectedTetEdges[i_TetEdge].IntNodes.size(); if(NumberIntNodes == 2) { numberDoubleCutEdges++; indexDoubleCutEdge = i_TetEdge; } } if((numberDoubleCutEdges >= 1)) { array_1d<double,3> normal_1 = IntersectedTetEdges[indexDoubleCutEdge].IntNodes[0].StructElemNormal; array_1d<double,3> normal_2 = IntersectedTetEdges[indexDoubleCutEdge].IntNodes[1].StructElemNormal; // normalize normals normal_1 /= norm_2(normal_1); normal_2 /= norm_2(normal_2); const double pi = 3.1415926; // compute angle between normals double angle_n1n2 = acos( inner_prod(normal_1,normal_2) ); // rad --> degree angle_n1n2 *= 180 / pi; // if angle between -60º and 120º, take the mean if( (angle_n1n2 > -60) && (angle_n1n2 < 120) ) { // take the mean of the normals N_mean = 0.5 * (normal_1 + normal_2); } else { N_mean = 0.5 * (normal_1 - normal_2); } // Based on N_mean and P_mean compute the distances to that plane for(unsigned int i_TetNode = 0 ; i_TetNode < 4 ; i_TetNode++) { ElementalDistances[i_TetNode] = PointDistanceToPlane(P_mean, N_mean, i_fluid_Element->GetGeometry()[i_TetNode] ); } } } ///****************************************************************************************************************** ///****************************************************************************************************************** /** * This function calculates the distance of a 3D point to a plane spanned by a 3D triangle * @param Plane base point * @param planeNormal * @param ToPoint The point which distance is required * @return The distance between the point and the plane spanned by the 3D triangle */ double PointDistanceToPlane( Point& planeBasePoint, array_1d<double, 3>& planeNormal, Point& ToPoint) { // calculate vector pointing from a node in the plane (e.g. triangle point 1) to the considered node ToPoint array_1d<double,3> planeToPointVec = ToPoint - planeBasePoint; // projection of node on the plane const double sn = inner_prod(planeToPointVec,planeNormal); const double sd = inner_prod(planeNormal,planeNormal); double DistanceToPlane = sn / sqrt(sd); if( fabs(DistanceToPlane) < epsilon ) DistanceToPlane = 0; return DistanceToPlane; } ///****************************************************************************************************************** ///****************************************************************************************************************** void AssignMinimalNodalDistance() { // loop over all fluid Elements for( ModelPart::ElementIterator i_fluid_Element = mrFluidModelPart.ElementsBegin(); i_fluid_Element != mrFluidModelPart.ElementsEnd(); i_fluid_Element++) { Geometry< Node<3> >& geom = i_fluid_Element->GetGeometry(); const Vector& ElementalDistances = i_fluid_Element->GetValue(ELEMENTAL_DISTANCES); // Assign distances to the single nodes, if a smaller value is found for( unsigned int i_TetNode = 0; i_TetNode < 4; i_TetNode++ ) { double currentNodeDist = geom[i_TetNode].GetSolutionStepValue(DISTANCE); double nodeInElemDist = ElementalDistances[i_TetNode]; if( fabs( nodeInElemDist ) < fabs( currentNodeDist ) ) geom[i_TetNode].GetSolutionStepValue(DISTANCE) = nodeInElemDist; // overwrite nodal distance (which is global) } // loop i_TetNode } // loop i_fluidElement } ///****************************************************************************************************************** ///****************************************************************************************************************** /** * If structure directly passes through the corner point of a tetrahedra (leading to zero distances * in the respective node), then a small distance value (different from zero) will be stored for * that point. This is necessary since the embedded solver cannot handle zero distances. * @param Element current Element which was cut by the structure (flag SPLIT_ELEMENT is set to one) * @param ElementalDistances Elemental distances calculated by the intersection pattern */ void AvoidZeroDistances( ModelPart::ElementsContainerType::iterator& Element, array_1d<double,4>& ElementalDistances) { // Assign a distance limit double dist_limit = 1e-5; // bool distChangedToLimit = false; //variable to indicate that a distance value < tolerance is set to a limit distance = tolerance // // for(unsigned int i_node = 0; i_node < 4; i_node++) // { // if(fabs(ElementalDistances[i_node]) < dist_limit) // { // ElementalDistances[i_node] = dist_limit; // distChangedToLimit = true; // } // } // // // Check, if this approach changes the split-flag (might be, that Element is not cut anymore if node with zero distance gets a positive limit distance value // unsigned int numberNodesPositiveDistance = 0; // for(unsigned int i_node = 0; i_node < 4; i_node++) // { // if((ElementalDistances[i_node]) > 0) // numberNodesPositiveDistance++; // } for(unsigned int i_node = 0; i_node < 4; i_node++) { double & di = ElementalDistances[i_node]; if(fabs(di) < dist_limit) { if(di >= 0) di = dist_limit; else di = -dist_limit; } } // Element is not set // if(numberNodesPositiveDistance == 4 && distChangedToLimit == true) // Element->GetValue(SPLIT_ELEMENT) = false; } ///****************************************************************************************************************** ///****************************************************************************************************************** void GenerateSkinModelPart( ModelPart& mrNewSkinModelPart ) { unsigned int id_node = mrFluidModelPart.NumberOfNodes() + 1; unsigned int id_condition = mrFluidModelPart.NumberOfConditions() + 1; mrNewSkinModelPart.Nodes().reserve(mrFluidModelPart.Nodes().size()); mrNewSkinModelPart.Conditions().reserve(mrFluidModelPart.Elements().size()); for(ModelPart::ElementIterator i_fluid_element = mrFluidModelPart.ElementsBegin(); i_fluid_element != mrFluidModelPart.ElementsEnd(); i_fluid_element++) { bool is_split = i_fluid_element->Is(TO_SPLIT); if(is_split == true) { const Vector& distances = i_fluid_element->GetValue(ELEMENTAL_DISTANCES); Geometry< Node<3> >& geom = i_fluid_element->GetGeometry(); // generate the points on the edges at the zero of the distance function std::vector< Point > edge_points; edge_points.reserve(4); // loop over all 6 edges of the tetrahedra for(unsigned int i=0; i<3; i++) { for(unsigned int j=i+1; j<4; j++) // go through the edges 01, 02, 03, 12, 13, 23 { double di = distances[i]; double dj = distances[j]; if(di*dj < 0) //edge is cut { // generate point on edge by linear interpolation double Ni = fabs(dj) / ( fabs(di) + fabs(dj) ); double Nj = 1.0 - Ni; Point edge_point(Ni * geom[i] + Nj * geom[j]); edge_points.push_back(edge_point); } } } // three intersection nodes if(edge_points.size() == 3) { // ######## ADDING NEW NODE ######### Node < 3 >::Pointer pnode1 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[0].X(),edge_points[0].Y(),edge_points[0].Z()); Node < 3 >::Pointer pnode2 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[1].X(),edge_points[1].Y(),edge_points[1].Z()); Node < 3 >::Pointer pnode3 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[2].X(),edge_points[2].Y(),edge_points[2].Z()); // ######## ADDING NEW CONDITION ######### //form a triangle Triangle3D3< Node<3> > triangle(pnode1, pnode2, pnode3); Condition const& rReferenceCondition = KratosComponents<Condition>::Get("SurfaceCondition3D"); Properties::Pointer properties = mrNewSkinModelPart.rProperties()(0); Condition::Pointer p_condition = rReferenceCondition.Create(id_condition++, triangle, properties); mrNewSkinModelPart.Conditions().push_back(p_condition); } // four intersection nodes if(edge_points.size() == 4) { //form a quadrilatera with the 4 cut nodes array_1d<double,3> x21 = edge_points[1] - edge_points[0]; array_1d<double,3> x31 = edge_points[2] - edge_points[0]; array_1d<double,3> x41 = edge_points[3] - edge_points[0]; //define a vector oriented as x21 array_1d<double,3> v1 = x21 / norm_2(x21); BoundedMatrix<double,4,3> DN_DX; array_1d<double,4> msN; double Area; GeometryUtils::CalculateGeometryData( geom, DN_DX, msN, Area ); array_1d<double,3> n = prod(trans(DN_DX),distances); n /= norm_2(n); array_1d<double,3> v2; MathUtils<double>::CrossProduct(v2,v1,n); // v2 = v1 x n array_1d<double,3> angles; angles[0] = 0.0; //angle between x21 and v1 angles[1] = atan2( inner_prod(x31,v2), inner_prod(x31,v1) ); //angle between x31 and v1 angles[2] = atan2( inner_prod(x41,v2), inner_prod(x41,v1) ); //angle between x31 and v1 double max_angle = 0.0; double min_angle = 0.0; unsigned int min_pos = 1; unsigned int max_pos = 1; for(unsigned int i=1; i<3; i++) { if(angles[i] < min_angle) { min_pos = i+1; //this is the local index of the edge point which forms the minimal angle min_angle = angles[i]; } else if(angles[i] > max_angle) { max_pos = i+1; //this is the local index of the edge point which forms the maximal angle max_angle = angles[i]; } } //find the pos of the center node unsigned int center_pos = 0; for(unsigned int i=1; i<4; i++) { if((i!= min_pos) && (i!=max_pos)) { center_pos = i; } } // ######## ADDING NEW NODE ######### Node < 3 >::Pointer pnode1 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[0].X(),edge_points[0].Y(),edge_points[0].Z()); Node < 3 >::Pointer pnode2 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[min_pos].X(),edge_points[min_pos].Y(),edge_points[min_pos].Z()); Node < 3 >::Pointer pnode3 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[center_pos].X(),edge_points[center_pos].Y(),edge_points[center_pos].Z()); Node < 3 >::Pointer pnode4 = mrNewSkinModelPart.CreateNewNode(id_node++,edge_points[max_pos].X(),edge_points[max_pos].Y(),edge_points[max_pos].Z()); // ######## ADDING NEW CONDITION ######### //form two triangles Triangle3D3< Node<3> > triangle1(pnode1, pnode2, pnode3); Triangle3D3< Node<3> > triangle2(pnode1, pnode3, pnode4); Condition const& rReferenceCondition = KratosComponents<Condition>::Get("SurfaceCondition3D"); Properties::Pointer properties = mrNewSkinModelPart.rProperties()(0); Condition::Pointer p_condition1 = rReferenceCondition.Create(id_condition++, triangle1, properties); Condition::Pointer p_condition2 = rReferenceCondition.Create(id_condition++, triangle2, properties); mrNewSkinModelPart.Conditions().push_back(p_condition1); mrNewSkinModelPart.Conditions().push_back(p_condition2); } } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void GenerateOctree() { Timer::Start("Generating Octree"); //std::cout << "Generating the Octree..." << std::endl; auto temp_octree = Kratos::make_shared<OctreeType>(); //OctreeType::Pointer temp_octree = OctreeType::Pointer(new OctreeType() ); mpOctree.swap(temp_octree); double low[3]; double high[3]; for (int i = 0 ; i < 3; i++) { low[i] = high[i] = mrFluidModelPart.NodesBegin()->Coordinates()[i]; } // loop over all nodes in the bounding box for(ModelPart::NodeIterator i_node = mrFluidModelPart.NodesBegin(); i_node != mrFluidModelPart.NodesEnd(); i_node++) { const array_1d<double,3>& r_coordinates = i_node->Coordinates(); for (int i = 0 ; i < 3; i++) { low[i] = r_coordinates[i] < low[i] ? r_coordinates[i] : low[i]; high[i] = r_coordinates[i] > high[i] ? r_coordinates[i] : high[i]; } } // loop over all skin nodes for(ModelPart::NodeIterator i_node = mrSkinModelPart.NodesBegin(); i_node != mrSkinModelPart.NodesEnd(); i_node++) { const array_1d<double,3>& r_coordinates = i_node->Coordinates(); for (int i = 0 ; i < 3; i++) { low[i] = r_coordinates[i] < low[i] ? r_coordinates[i] : low[i]; high[i] = r_coordinates[i] > high[i] ? r_coordinates[i] : high[i]; } } mpOctree->SetBoundingBox(low,high); //mpOctree->RefineWithUniformSize(0.0625); // loop over all structure nodes for(ModelPart::NodeIterator i_node = mrSkinModelPart.NodesBegin(); i_node != mrSkinModelPart.NodesEnd(); i_node++) { double temp_point[3]; temp_point[0] = i_node->X(); temp_point[1] = i_node->Y(); temp_point[2] = i_node->Z(); mpOctree->Insert(temp_point); } //mpOctree->Constrain2To1(); // To be removed. Pooyan. // loop over all structure elements for(ModelPart::ElementIterator i_element = mrSkinModelPart.ElementsBegin(); i_element != mrSkinModelPart.ElementsEnd(); i_element++) { mpOctree->Insert(*(i_element).base()); } Timer::Stop("Generating Octree"); // KRATOS_WATCH(mpOctree); // std::cout << "######## WRITING OCTREE MESH #########" << std::endl; // std::ofstream myfile; // myfile.open ("octree.post.msh"); // mpOctree.PrintGiDMesh(myfile); // myfile.close(); //std::cout << "Generating the Octree finished" << std::endl; } ///****************************************************************************************************************** ///****************************************************************************************************************** void GenerateNodes() { Timer::Start("Generating Nodes"); std::vector<OctreeType::cell_type*> all_leaves; mpOctree->GetAllLeavesVector(all_leaves); int leaves_size = all_leaves.size(); #pragma omp parallel for for (int i = 0; i < leaves_size; i++) { *(all_leaves[i]->pGetDataPointer()) = ConfigurationType::AllocateData(); } std::size_t last_id = mrBodyModelPart.NumberOfNodes() + 1; for (std::size_t i = 0; i < all_leaves.size(); i++) { CellType* cell = all_leaves[i]; GenerateCellNode(cell, last_id); } Timer::Stop("Generating Nodes"); } ///****************************************************************************************************************** ///****************************************************************************************************************** void GenerateCellNode(CellType* pCell, std::size_t& LastId) { for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center { DistanceSpatialContainersConfigure::cell_node_data_type* p_node = (*(pCell->pGetData()))[i_pos]; if(p_node == 0) { (*(pCell->pGetData()))[i_pos] = new DistanceSpatialContainersConfigure::cell_node_data_type; (*(pCell->pGetData()))[i_pos]->Id() = LastId++; mOctreeNodes.push_back((*(pCell->pGetData()))[i_pos]); SetNodeInNeighbours(pCell,i_pos,(*(pCell->pGetData()))[i_pos]); } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void SetNodeInNeighbours(CellType* pCell, int Position, CellNodeDataType* pNode) { CellType::key_type point_key[3]; pCell->GetKey(Position, point_key); for (std::size_t i_direction = 0; i_direction < 8; i_direction++) { CellType::key_type neighbour_key[3]; if (pCell->GetNeighbourKey(Position, i_direction, neighbour_key)) { CellType* neighbour_cell = mpOctree->pGetCell(neighbour_key); if (!neighbour_cell || (neighbour_cell == pCell)) continue; std::size_t position = neighbour_cell->GetLocalPosition(point_key); if((*neighbour_cell->pGetData())[position]) { std::cout << "ERROR!! Bad Position calculated!!!!!!!!!!! position :" << position << std::endl; continue; } (*neighbour_cell->pGetData())[position] = pNode; } } } ///****************************************************************************************************************** ///****************************************************************************************************************** void CalculateDistance2() { Timer::Start("Calculate Distances2"); ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray(); int nodes_size = nodes.size(); // // first of all we reset the node distance to 1.00 which is the maximum distnace in our normalized space. //#pragma omp parallel for firstprivate(nodes_size) // for(int i = 0 ; i < nodes_size ; i++) // nodes[i]->GetSolutionStepValue(DISTANCE) = 1.00; std::vector<CellType*> leaves; mpOctree->GetAllLeavesVector(leaves); //int leaves_size = leaves.size(); // for(int i = 0 ; i < leaves_size ; i++) // CalculateNotEmptyLeavesDistance(leaves[i]); #pragma omp parallel for firstprivate(nodes_size) for(int i = 0 ; i < nodes_size ; i++) { CalculateNodeDistance(*(nodes[i])); } Timer::Stop("Calculate Distances2"); } ///****************************************************************************************************************** ///****************************************************************************************************************** // void CalculateDistance3() // { // Timer::Start("Calculate Distances2"); // ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray(); // int nodes_size = nodes.size(); //// // first of all we reset the node distance to 1.00 which is the maximum distnace in our normalized space. //#pragma omp parallel for firstprivate(nodes_size) // for(int i = 0 ; i < nodes_size ; i++) // nodes[i]->GetSolutionStepValue(DISTANCE) = 1.00; // std::vector<CellType*> leaves; // mpOctree->GetAllLeavesVector(leaves); // int leaves_size = leaves.size(); // for(int i = 0 ; i < leaves_size ; i++) // CalculateNotEmptyLeavesDistance(leaves[i]); //#pragma omp parallel for firstprivate(nodes_size) // for(int i = 0 ; i < nodes_size ; i++) // { // CalculateNodeDistance(*(nodes[i])); // } // Timer::Stop("Calculate Distances2"); // } // void CalculateDistance4() // { // Timer::Start("Calculate Distances3"); // ModelPart::NodesContainerType::ContainerType& nodes = mrFluidModelPart.NodesArray(); // int nodes_size = nodes.size(); // std::vector<CellType*> leaves; // mpOctree->GetAllLeavesVector(leaves); // int leaves_size = leaves.size(); //#pragma omp parallel for firstprivate(nodes_size) // for(int i = 0 ; i < nodes_size ; i++) // { // CalculateNodeDistanceFromCell(*(nodes[i])); // } // Timer::Stop("Calculate Distances3"); // } void CalculateDistance() { Timer::Start("Calculate Distances"); DistanceSpatialContainersConfigure::data_type& nodes = mOctreeNodes; int nodes_size = nodes.size(); // first of all we reste the node distance to 1.00 which is the maximum distnace in our normalized space. #pragma omp parallel for firstprivate(nodes_size) for(int i = 0 ; i < nodes_size ; i++) nodes[i]->Distance() = 1.00; std::vector<CellType*> leaves; mpOctree->GetAllLeavesVector(leaves); int leaves_size = leaves.size(); for(int i = 0 ; i < leaves_size ; i++) CalculateNotEmptyLeavesDistance(leaves[i]); for(int i_direction = 0 ; i_direction < 1 ; i_direction++) { //#pragma omp parallel for firstprivate(nodes_size) for(int i = 0 ; i < nodes_size ; i++) { if(nodes[i]->X() < 1.00 && nodes[i]->Y() < 1.00 && nodes[i]->Z() < 1.00) // if((*nodes[i])[i_direction] == 0.00) CalculateDistance(*(nodes[i]), i_direction); } } Timer::Stop("Calculate Distances"); } void CalculateDistance(CellNodeDataType& rNode, int i_direction) { double coords[3] = {rNode.X(), rNode.Y(), rNode.Z()}; // KRATOS_WATCH_3(coords); //This function must color the positions in space defined by 'coords'. //coords is of dimension (3) normalized in (0,1)^3 space typedef Element::GeometryType triangle_type; typedef std::vector<std::pair<double, triangle_type*> > intersections_container_type; intersections_container_type intersections; DistanceSpatialContainersConfigure::data_type nodes_array; const double epsilon = 1e-12; double distance = 1.0; // Creating the ray double ray[3] = {coords[0], coords[1], coords[2]}; mpOctree->NormalizeCoordinates(ray); ray[i_direction] = 0; // starting from the lower extreme // KRATOS_WATCH_3(ray) GetIntersectionsAndNodes(ray, i_direction, intersections, nodes_array); // KRATOS_WATCH(nodes_array.size()) for (std::size_t i_node = 0; i_node < nodes_array.size() ; i_node++) { double coord = (*nodes_array[i_node])[i_direction]; // KRATOS_WATCH(intersections.size()); int ray_color= 1; std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin(); while (i_intersection != intersections.end()) { double d = coord - i_intersection->first; if (d > epsilon) { ray_color = -ray_color; distance = d; } else if (d > -epsilon) {//interface distance = 0.00; break; } else { if(distance > -d) distance = -d; break; } i_intersection++; } distance *= ray_color; double& node_distance = nodes_array[i_node]->Distance(); if(fabs(distance) < fabs(node_distance)) node_distance = distance; else if (distance*node_distance < 0.00) // assigning the correct sign node_distance = -node_distance; } } void CalculateNotEmptyLeavesDistance(CellType* pCell) { //typedef Element::GeometryType triangle_type; typedef OctreeType::cell_type::object_container_type object_container_type; object_container_type* objects = (pCell->pGetObjects()); // There are no intersection in empty cells if (objects->empty()) return; for (int i_pos=0; i_pos < 8; i_pos++) // position 8 is for center { double distance = 1.00; // maximum distance is 1.00 for(object_container_type::iterator i_object = objects->begin(); i_object != objects->end(); i_object++) { CellType::key_type keys[3]; pCell->GetKey(i_pos,keys); double cell_point[3]; mpOctree->CalculateCoordinates(keys,cell_point); double d = GeometryUtils::PointDistanceToTriangle3D((*i_object)->GetGeometry()[0], (*i_object)->GetGeometry()[1], (*i_object)->GetGeometry()[2], Point(cell_point[0], cell_point[1], cell_point[2])); if(d < distance) distance = d; } double& node_distance = (*(pCell->pGetData()))[i_pos]->Distance(); if(distance < node_distance) node_distance = distance; } } void CalculateNodeDistance(Node<3>& rNode) { double coord[3] = {rNode.X(), rNode.Y(), rNode.Z()}; double distance = DistancePositionInSpace(coord); double& node_distance = rNode.GetSolutionStepValue(DISTANCE); //const double epsilon = 1.00e-12; //if(fabs(node_distance) > fabs(distance)) // node_distance = distance; /*else*/ if (distance*node_distance < 0.00) // assigning the correct sign node_distance = -node_distance; } // void CalculateNodeDistanceFromCell(Node<3>& rNode) // { // OctreeType::key_type node_key[3] = {octree->CalcKeyNormalized(rNode.X()), octree->CalcKeyNormalized(rNode.Y()), octree->CalcKeyNormalized(rNode.Z())}; // OctreeType::cell_type* pcell = octree->pGetCell(node_key); // object_container_type* objects = (pCell->pGetObjects()); // // We interpolate the cell distances for the node in empty cells // if (objects->empty()) // { // } // double distance = DistancePositionInSpace(coord); // double& node_distance = rNode.GetSolutionStepValue(DISTANCE); // //const double epsilon = 1.00e-12; // if(fabs(node_distance) > fabs(distance)) // node_distance = distance; // else if (distance*node_distance < 0.00) // assigning the correct sign // node_distance = -node_distance; // } double DistancePositionInSpace(double* coords) { //This function must color the positions in space defined by 'coords'. //coords is of dimension (3) normalized in (0,1)^3 space typedef Element::GeometryType triangle_type; typedef std::vector<std::pair<double, triangle_type*> > intersections_container_type; intersections_container_type intersections; const int dimension = 3; const double epsilon = 1e-12; double distances[3] = {1.0, 1.0, 1.0}; for (int i_direction = 0; i_direction < dimension; i_direction++) { // Creating the ray double ray[3] = {coords[0], coords[1], coords[2]}; mpOctree->NormalizeCoordinates(ray); ray[i_direction] = 0; // starting from the lower extreme GetIntersections(ray, i_direction, intersections); // if(intersections.size() == 1) // KRATOS_WATCH_3(ray) // KRATOS_WATCH(intersections.size()); int ray_color= 1; std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin(); while (i_intersection != intersections.end()) { double d = coords[i_direction] - i_intersection->first; if (d > epsilon) { ray_color = -ray_color; distances[i_direction] = d; // if(distances[i_direction] > d) // I think this is redundunt. Pooyan. // { // if(ray_color > 0.00) // distances[i_direction] = d; // else // distances[i_direction] = -d; // } } else if (d > -epsilon) {//interface distances[i_direction] = 0.00; break; } else { if(distances[i_direction] > -d) distances[i_direction] = -d; break; } i_intersection++; } distances[i_direction] *= ray_color; } // if(distances[0]*distances[1] < 0.00 || distances[2]*distances[1] < 0.00) // KRATOS_WATCH_3(distances); //#ifdef _DEBUG // std::cout << "colors : " << colors[0] << ", " << colors[1] << ", " << colors[2] << std::endl; //#endif double distance = (fabs(distances[0]) > fabs(distances[1])) ? distances[1] : distances[0]; distance = (fabs(distance) > fabs(distances[2])) ? distances[2] : distance; return distance; } void GetIntersectionsAndNodes(double* ray, int direction, std::vector<std::pair<double,Element::GeometryType*> >& intersections, DistanceSpatialContainersConfigure::data_type& rNodesArray) { //This function passes the ray through the model and gives the hit point to all objects in its way //ray is of dimension (3) normalized in (0,1)^3 space // direction can be 0,1,2 which are x,y and z respectively const double epsilon = 1.00e-12; // first clearing the intersections points vector intersections.clear(); //OctreeType* octree = &mOctree; OctreeType* octree = mpOctree.get(); OctreeType::key_type ray_key[3] = {octree->CalcKeyNormalized(ray[0]), octree->CalcKeyNormalized(ray[1]), octree->CalcKeyNormalized(ray[2])}; OctreeType::key_type cell_key[3]; // getting the entrance cell from lower extreme ray_key[direction] = 0; OctreeType::cell_type* cell = octree->pGetCell(ray_key); while (cell) { std::size_t position = cell->GetLocalPosition(ray_key); // Is this the local position!?!?!?! OctreeType::key_type node_key[3]; cell->GetKey(position, node_key); if((node_key[0] == ray_key[0]) && (node_key[1] == ray_key[1]) && (node_key[2] == ray_key[2])) { if(cell->pGetData()) { if(cell->pGetData()->size() > position) { CellNodeDataType* p_node = (*cell->pGetData())[position]; if(p_node) { //KRATOS_WATCH(p_node->Id()) rNodesArray.push_back(p_node); } } else KRATOS_WATCH(cell->pGetData()->size()) } } // std::cout << "."; GetCellIntersections(cell, ray, ray_key, direction, intersections); // Add the cell's middle node if existed // cell->GetKey(8, cell_key); // 8 is the central position // ray_key[direction]=cell_key[direction]; // positioning the ray in the middle of cell in its direction // position = cell->GetLocalPosition(ray_key); // if(position < 27) // principal nodes // { // if(cell->pGetData()) // { // if(cell->pGetData()->size() > position) // { // Node<3>* p_node = (*cell->pGetData())[position]; // if(p_node) // { // //KRATOS_WATCH(p_node->Id()) // rNodesArray.push_back(p_node); // } // } // else // KRATOS_WATCH(cell->pGetData()->size()) // } // } // else // { // KRATOS_WATCH(position); // KRATOS_WATCH(*cell); // } // go to the next cell if (cell->GetNeighbourKey(1 + direction * 2, cell_key)) { ray_key[direction] = cell_key[direction]; cell = octree->pGetCell(ray_key); ray_key[direction] -= 1 ;//the key returned by GetNeighbourKey is inside the cell (minkey +1), to ensure that the corresponding //cell get in pGetCell is the right one. //#ifdef _DEBUG // Octree_Pooyan::key_type min_key[3]; // cell->GetMinKey(min_key[0],min_key[1],min_key[2]); // Octree_Pooyan::key_type tmp; // tmp= min_key[direction]; // assert(ray_key[direction]==tmp); //#endif } else cell = NULL; } // KRATOS_WATCH(rNodesArray.size()); // now eliminating the repeated objects if (!intersections.empty()) { //sort std::sort(intersections.begin(), intersections.end()); // unique std::vector<std::pair<double, Element::GeometryType*> >::iterator i_begin = intersections.begin(); std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin(); while (++i_begin != intersections.end()) { // considering the very near points as the same points if (fabs(i_begin->first - i_intersection->first) > epsilon) // if the hit points are far enough they are not the same *(++i_intersection) = *i_begin; } intersections.resize((++i_intersection) - intersections.begin()); } } void GetIntersections(double* ray, int direction, std::vector<std::pair<double,Element::GeometryType*> >& intersections) { //This function passes the ray through the model and gives the hit point to all objects in its way //ray is of dimension (3) normalized in (0,1)^3 space // direction can be 0,1,2 which are x,y and z respectively const double epsilon = 1.00e-12; // first clearing the intersections points vector intersections.clear(); //OctreeType* octree = &mOctree; OctreeType* octree = mpOctree.get(); OctreeType::key_type ray_key[3] = {octree->CalcKeyNormalized(ray[0]), octree->CalcKeyNormalized(ray[1]), octree->CalcKeyNormalized(ray[2])}; OctreeType::key_type cell_key[3]; // getting the entrance cell from lower extreme OctreeType::cell_type* cell = octree->pGetCell(ray_key); while (cell) { // std::cout << "."; GetCellIntersections(cell, ray, ray_key, direction, intersections); // go to the next cell if (cell->GetNeighbourKey(1 + direction * 2, cell_key)) { ray_key[direction] = cell_key[direction]; cell = octree->pGetCell(ray_key); ray_key[direction] -= 1 ;//the key returned by GetNeighbourKey is inside the cell (minkey +1), to ensure that the corresponding //cell get in pGetCell is the right one. //#ifdef _DEBUG // Octree_Pooyan::key_type min_key[3]; // cell->GetMinKey(min_key[0],min_key[1],min_key[2]); // Octree_Pooyan::key_type tmp; // tmp= min_key[direction]; // assert(ray_key[direction]==tmp); //#endif } else cell = NULL; } // now eliminating the repeated objects if (!intersections.empty()) { //sort std::sort(intersections.begin(), intersections.end()); // unique std::vector<std::pair<double, Element::GeometryType*> >::iterator i_begin = intersections.begin(); std::vector<std::pair<double, Element::GeometryType*> >::iterator i_intersection = intersections.begin(); while (++i_begin != intersections.end()) { // considering the very near points as the same points if (fabs(i_begin->first - i_intersection->first) > epsilon) // if the hit points are far enough they are not the same *(++i_intersection) = *i_begin; } intersections.resize((++i_intersection) - intersections.begin()); } } int GetCellIntersections(OctreeType::cell_type* cell, double* ray, OctreeType::key_type* ray_key, int direction, std::vector<std::pair<double, Element::GeometryType*> >& intersections) { //This function passes the ray through the cell and gives the hit point to all objects in its way //ray is of dimension (3) normalized in (0,1)^3 space // direction can be 0,1,2 which are x,y and z respectively //typedef Element::GeometryType triangle_type; typedef OctreeType::cell_type::object_container_type object_container_type; object_container_type* objects = (cell->pGetObjects()); // There are no intersection in empty cells if (objects->empty()) return 0; // std::cout << "X"; // calculating the two extreme of the ray segment inside the cell double ray_point1[3] = {ray[0], ray[1], ray[2]}; double ray_point2[3] = {ray[0], ray[1], ray[2]}; double normalized_coordinate; mpOctree->CalculateCoordinateNormalized(ray_key[direction], normalized_coordinate); ray_point1[direction] = normalized_coordinate; ray_point2[direction] = ray_point1[direction] + mpOctree->CalcSizeNormalized(cell); mpOctree->ScaleBackToOriginalCoordinate(ray_point1); mpOctree->ScaleBackToOriginalCoordinate(ray_point2); for (object_container_type::iterator i_object = objects->begin(); i_object != objects->end(); i_object++) { double intersection[3]={0.00,0.00,0.00}; int is_intersected = IntersectionTriangleSegment((*i_object)->GetGeometry(), ray_point1, ray_point2, intersection); // This intersection has to be optimized for axis aligned rays if (is_intersected == 1) // There is an intersection but not coplanar intersections.push_back(std::pair<double, Element::GeometryType*>(intersection[direction], &((*i_object)->GetGeometry()))); //else if(is_intersected == 2) // coplanar case } return 0; } int IntersectionTriangleSegment(Element::GeometryType& rGeometry, double* RayPoint1, double* RayPoint2, double* IntersectionPoint) { // This is the adaption of the implemnetation provided in: // http://www.softsurfer.com/Archive/algorithm_0105/algorithm_0105.htm#intersect_RayTriangle() const double epsilon = 1.00e-12; array_1d<double,3> u, v, n; // triangle vectors array_1d<double,3> dir, w0, w; // ray vectors double r, a, b; // params to calc ray-plane intersect // get triangle edge vectors and plane normal u = rGeometry[1] - rGeometry[0]; v = rGeometry[2] - rGeometry[0]; MathUtils<double>::CrossProduct(n, u, v); // cross product if (norm_2(n) == 0) // triangle is degenerate return -1; // do not deal with this case double triangle_origin_distance = -inner_prod(n, rGeometry[0]); Point ray_point_1, ray_point_2; for(int i = 0 ; i < 3 ; i++) { dir[i] = RayPoint2[i] - RayPoint1[i]; // ray direction vector w0[i] = RayPoint1[i] - rGeometry[0][i]; ray_point_1[i] = RayPoint1[i]; ray_point_2[i] = RayPoint2[i]; } double sign_distance_1 = inner_prod(n, ray_point_1) + triangle_origin_distance; double sign_distance_2 = inner_prod(n, ray_point_2) + triangle_origin_distance; if (sign_distance_1*sign_distance_2 > epsilon) // segment line point on the same side of plane return 0; a = -inner_prod(n,w0); b = inner_prod(n,dir); if (fabs(b) < epsilon) { // ray is parallel to triangle plane if (a == 0) // ray lies in triangle plane return 2; else return 0; // ray disjoint from plane } // get intersect point of ray with triangle plane r = a / b; if (r < 0.0) // ray goes away from triangle return 0; // => no intersect // for a segment, also test if (r > 1.0) => no intersect for(int i = 0 ; i < 3 ; i++) IntersectionPoint[i] = RayPoint1[i] + r * dir[i]; // intersect point of ray and plane // is I inside T? double uu, uv, vv, wu, wv, D; uu = inner_prod(u,u); uv = inner_prod(u,v); vv = inner_prod(v,v); for(int i = 0 ; i < 3 ; i++) w[i] = IntersectionPoint[i] - rGeometry[0][i]; wu = inner_prod(w,u); wv = inner_prod(w,v); D = uv * uv - uu * vv; // get and test parametric coords double s, t; s = (uv * wv - vv * wu) / D; if (s < 0.0 - epsilon || s > 1.0 + epsilon) // I is outside T return 0; t = (uv * wu - uu * wv) / D; if (t < 0.0 - epsilon || (s + t) > 1.0 + epsilon) // I is outside T return 0; return 1; // I is in T } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Input and output ///@{ /// Turn back information as a string. std::string Info() const override { return "CalculateSignedDistanceTo3DSkinProcess"; } /// Print information about this object. void PrintInfo(std::ostream& rOStream) const override { rOStream << "CalculateSignedDistanceTo3DSkinProcess"; } /// Print object's data. void PrintData(std::ostream& rOStream) const override { } void PrintGiDMesh(std::ostream & rOStream) const { std::vector<CellType*> leaves; mpOctree->GetAllLeavesVector(leaves); std::cout << "writing " << leaves.size() << " leaves" << std::endl; rOStream << "MESH \"leaves\" dimension 3 ElemType Hexahedra Nnode 8" << std::endl; rOStream << "# color 96 96 96" << std::endl; rOStream << "Coordinates" << std::endl; rOStream << "# node number coordinate_x coordinate_y coordinate_z " << std::endl; for(DistanceSpatialContainersConfigure::data_type::const_iterator i_node = mOctreeNodes.begin() ; i_node != mOctreeNodes.end() ; i_node++) { rOStream << (*i_node)->Id() << " " << (*i_node)->X() << " " << (*i_node)->Y() << " " << (*i_node)->Z() << std::endl; //mpOctree->Insert(temp_point); } std::cout << "Nodes written..." << std::endl; rOStream << "end coordinates" << std::endl; rOStream << "Elements" << std::endl; rOStream << "# Element node_1 node_2 node_3 material_number" << std::endl; for (std::size_t i = 0; i < leaves.size(); i++) { if ((leaves[i]->pGetData())) { DistanceSpatialContainersConfigure::data_type& nodes = (*(leaves[i]->pGetData())); rOStream << i + 1; for(int j = 0 ; j < 8 ; j++) rOStream << " " << nodes[j]->Id(); rOStream << std::endl; } } rOStream << "end Elements" << std::endl; } void PrintGiDResults(std::ostream & rOStream) const { std::vector<CellType*> leaves; mpOctree->GetAllLeavesVector(leaves); rOStream << "GiD Post Results File 1.0" << std::endl << std::endl; rOStream << "Result \"Distance\" \"Kratos\" 1 Scalar OnNodes" << std::endl; rOStream << "Values" << std::endl; for(DistanceSpatialContainersConfigure::data_type::const_iterator i_node = mOctreeNodes.begin() ; i_node != mOctreeNodes.end() ; i_node++) { rOStream << (*i_node)->Id() << " " << (*i_node)->Distance() << std::endl; } rOStream << "End Values" << std::endl; } ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ ModelPart& mrSkinModelPart; ModelPart& mrBodyModelPart; ModelPart& mrFluidModelPart; DistanceSpatialContainersConfigure::data_type mOctreeNodes; Kratos::shared_ptr<OctreeType> mpOctree; static const double epsilon; /** * @} */ /** * calculates the eigenvectors and eigenvalues of given symmetric matrix A. * The eigenvectors and eigenvalues are calculated using the iterative * Gauss-Seidel-method * @param A the given symmetric matrix the eigenvectors are to be calculated. * :WARNING: Matrix A will be overwritten and has to be symmetric * @param V the result matrix (will be overwritten with the eigenvectors) * @param zero_tolerance the largest value considered to be zero */ static inline void EigenVectors(const Matrix& A, Matrix& vectors, Vector& lambda, double zero_tolerance =1e-9, int max_iterations = 10) { Matrix Help= A; for(int i=0; i<3; i++) for(int j=0; j<3; j++) Help(i,j)= Help(i,j); vectors.resize(Help.size1(),Help.size2(),false); lambda.resize(Help.size1(),false); Matrix HelpDummy(Help.size1(),Help.size2()); bool is_converged = false; Matrix unity=ZeroMatrix(Help.size1(),Help.size2()); for(unsigned int i=0; i< Help.size1(); i++) unity(i,i)= 1.0; Matrix V= unity; Matrix VDummy(Help.size1(),Help.size2()); Matrix Rotation(Help.size1(),Help.size2()); for(int iterations=0; iterations<max_iterations; iterations++) { is_converged= true; double a= 0.0; unsigned int index1= 0; unsigned int index2= 1; for(unsigned int i=0; i< Help.size1(); i++) { for(unsigned int j=(i+1); j< Help.size2(); j++) { if((fabs(Help(i,j)) > a ) && (fabs(Help(i,j)) > zero_tolerance)) { a= fabs(Help(i,j)); index1= i; index2= j; is_converged= false; } } } // KRATOS_WATCH(Help); if(is_converged) break; //Calculation of Rotationangle double gamma= (Help(index2,index2)-Help(index1,index1))/(2*Help(index1,index2)); double u=1.0; if(fabs(gamma) > zero_tolerance && fabs(gamma)< (1/zero_tolerance)) { u= gamma/fabs(gamma)*1.0/(fabs(gamma)+sqrt(1.0+gamma*gamma)); } else { if (fabs(gamma)>= (1.0/zero_tolerance)) u= 0.5/gamma; } double c= 1.0/(sqrt(1.0+u*u)); double s= c*u; double teta= s/(1.0+c); //Ratotion of the Matrix HelpDummy= Help; HelpDummy(index2,index2)= Help(index2,index2)+u*Help(index1,index2); HelpDummy(index1,index1)= Help(index1,index1)-u*Help(index1,index2); HelpDummy(index1,index2)= 0.0; HelpDummy(index2,index1)= 0.0; for(unsigned int i=0; i<Help.size1(); i++) { if((i!= index1) && (i!= index2)) { HelpDummy(index2,i)=Help(index2,i)+s*(Help(index1,i)- teta*Help(index2,i)); HelpDummy(i,index2)=Help(index2,i)+s*(Help(index1,i)- teta*Help(index2,i)); HelpDummy(index1,i)=Help(index1,i)-s*(Help(index2,i)+ teta*Help(index1,i)); HelpDummy(i,index1)=Help(index1,i)-s*(Help(index2,i)+ teta*Help(index1,i)); } } Help= HelpDummy; //Calculation of the eigenvectors V Rotation =unity; Rotation(index2,index1)=-s; Rotation(index1,index2)=s; Rotation(index1,index1)=c; Rotation(index2,index2)=c; // Help=ZeroMatrix(A.size1(),A.size1()); VDummy = ZeroMatrix(Help.size1(), Help.size2()); for(unsigned int i=0; i< Help.size1(); i++) { for(unsigned int j=0; j< Help.size1(); j++) { for(unsigned int k=0; k< Help.size1(); k++) { VDummy(i,j) += V(i,k)*Rotation(k,j); } } } V= VDummy; } if(!(is_converged)) { std::cout<<"########################################################"<<std::endl; std::cout<<"Max_Iterations exceed in Jacobi-Seidel-Iteration (eigenvectors)"<<std::endl; std::cout<<"########################################################"<<std::endl; } for(unsigned int i=0; i< Help.size1(); i++) { for(unsigned int j=0; j< Help.size1(); j++) { vectors(i,j)= V(j,i); } } for(unsigned int i=0; i<Help.size1(); i++) lambda(i)= Help(i,i); return; } inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int>& partitions) { partitions.resize(number_of_threads + 1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for (unsigned int i = 1; i < number_of_threads; i++) partitions[i] = partitions[i - 1] + partition_size; } ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ /// Assignment operator. CalculateSignedDistanceTo3DSkinProcess& operator=(CalculateSignedDistanceTo3DSkinProcess const& rOther); /// Copy constructor. //CalculateSignedDistanceTo3DSkinProcess(CalculateSignedDistanceTo3DSkinProcess const& rOther); ///@} }; // Class CalculateSignedDistanceTo3DSkinProcess ///@} ///@name Type Definitions ///@{ ///@} ///@name Input and output ///@{ /// input stream function inline std::istream& operator >> (std::istream& rIStream, CalculateSignedDistanceTo3DSkinProcess& rThis); /// output stream function inline std::ostream& operator << (std::ostream& rOStream, const CalculateSignedDistanceTo3DSkinProcess& rThis) { rThis.PrintInfo(rOStream); rOStream << std::endl; rThis.PrintData(rOStream); return rOStream; } ///@} const double CalculateSignedDistanceTo3DSkinProcess::epsilon = 1e-18; } // namespace Kratos. #endif // KRATOS_CALCULATE_DISTANCE_PROCESS_H_INCLUDED defined
omp_threadprivate_for.c
// RUN: %libomp-compile-and-run // REQUIRES: !(abt && (clang || gcc)) #include "omp_testsuite.h" #include <stdlib.h> #include <stdio.h> static int i; #pragma omp threadprivate(i) int test_omp_threadprivate_for() { int known_sum; int sum; known_sum = (LOOPCOUNT * (LOOPCOUNT + 1)) / 2; sum = 0; #pragma omp parallel { int sum0 = 0, i0; #pragma omp for for (i0 = 1; i0 <= LOOPCOUNT; i0++) { i = i0; sum0 = sum0 + i; } #pragma omp critical { sum = sum + sum0; } } /* end of parallel */ if (known_sum != sum ) { fprintf(stderr, " known_sum = %d, sum = %d\n", known_sum, sum); } return (known_sum == sum); } /* end of check_threadprivate*/ int main() { int i; int num_failed=0; for(i = 0; i < REPETITIONS; i++) { if(!test_omp_threadprivate_for()) { num_failed++; } } return num_failed; }
openmp_demo.c
//------------------------------------------------------------------------------ // GraphBLAS/Demo/Program/openmp_demo: example of user multithreading //------------------------------------------------------------------------------ // This demo uses OpenMP, and should work if GraphBLAS is compiled to // use either OpenMP or pthreads to synchronize multiple user threadds. // If OpenMP is not available, this program will work fine without it, in a // single user thread, regardless of the thread mechanism used by GraphBLAS. #include "GraphBLAS.h" #ifdef _OPENMP #include <omp.h> #endif #if defined __INTEL_COMPILER #pragma warning (disable: 58 167 144 177 181 186 188 589 593 869 981 1418 1419 1572 1599 2259 2282 2557 2547 3280 ) #elif defined __GNUC__ #pragma GCC diagnostic ignored "-Wunknown-pragmas" #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wincompatible-pointer-types" #endif #define NTHREADS 8 #define NTRIALS 10 #define N 6 #define OK(method) \ { \ GrB_Info info = method ; \ if (! (info == GrB_SUCCESS || info == GrB_NO_VALUE)) \ { \ printf ("Failure (id: %d, info: %d): %s\n", \ id, info, GrB_error ( )) ; \ /* return to caller (do not use inside critical section) */ \ return (0) ; \ } \ } //------------------------------------------------------------------------------ // worker //------------------------------------------------------------------------------ int worker (GrB_Matrix *Ahandle, int id) { printf ("\n================= worker %d starts:\n", id) ; fprintf (stderr, "worker %d\n", id) ; OK (GrB_Matrix_new (Ahandle, GrB_FP64, N, N)) ; GrB_Matrix A = *Ahandle ; // worker generates an intentional error message GrB_Matrix_setElement (A, 42, 1000+id, 1000+id) ; // print the intentional error generated when the worker started #pragma omp critical { // critical section printf ("\n----------------- worker %d intentional error:\n", id) ; printf ("%s\n", GrB_error ( )) ; } for (int hammer_hard = 0 ; hammer_hard < NTRIALS ; hammer_hard++) { for (int i = 0 ; i < N ; i++) { for (int j = 0 ; j < N ; j++) { double x = (i+1)*100000 + (j+1)*1000 + id ; OK (GrB_Matrix_setElement (A, x, i, j)) ; } } // force completion GrB_Index nvals ; OK (GrB_Matrix_nvals (&nvals, A)) ; } // Printing is done in a critical section, just so it is not overly // jumbled. Each matrix and error will print in a single body of text, // but the order of the matrices and errors printed will be out of order // because the critical section does not enforce the order that the // threads enter. GrB_Info info2 ; #pragma omp critical { // critical section printf ("\n----------------- worker %d is done:\n", id) ; info2 = GxB_print (A, GxB_SHORT) ; } OK (info2) ; // worker generates an intentional error message GrB_Matrix_setElement (A, 42, 1000+id, 1000+id) ; // print the intentional error generated when the worker started // It should be unchanged. #pragma omp critical { // critical section printf ("\n----------------- worker %d error should be same:\n", id) ; printf ("%s\n", GrB_error ( )) ; } return (0) ; } //------------------------------------------------------------------------------ // openmp_demo main program //------------------------------------------------------------------------------ int main (int argc, char **argv) { fprintf (stderr, "Demo: %s:\n", argv [0]) ; printf ("Demo: %s:\n", argv [0]) ; // initialize the mutex int id = -1 ; // start GraphBLAS OK (GrB_init (GrB_NONBLOCKING)) ; // Determine which user-threading model is being used. GxB_Thread_Model thread_safety ; GxB_get (GxB_THREAD_SAFETY, &thread_safety) ; printf ("GraphBLAS is using ") ; switch (thread_safety) { case GxB_THREAD_POSIX : printf ("a POSIX pthread mutex\n") ; break ; case GxB_THREAD_WINDOWS : printf ("a Windows CriticalSection\n") ; break ; case GxB_THREAD_ANSI : printf ("an ANSI C11 mtx_lock\n") ; break ; case GxB_THREAD_OPENMP : printf ("an OpenMP critical section\n") ; break ; default : // GxB_THREAD_NONE #ifdef _OPENMP printf ("(nothing! This will fail!)\n") ; #else printf ("nothing (OK since user program is single-threaded)\n") ; #endif break ; } printf ("to synchronize user threads.\n") ; #ifdef _OPENMP printf ("User threads in this program are OpenMP threads.\n") ; #else printf ("This user program is single threaded.\n") ; #endif GrB_Matrix Aarray [NTHREADS] ; // create the threads #pragma omp parallel for num_threads(NTHREADS) for (int id = 0 ; id < NTHREADS ; id++) { worker (&Aarray [id], id) ; } // the master thread prints them again, and frees them for (int id = 0 ; id < NTHREADS ; id++) { GrB_Matrix A = Aarray [id] ; printf ("\n---- Master prints matrix %d\n", id) ; OK (GxB_print (A, GxB_SHORT)) ; GrB_free (&A) ; } // print an error message printf ("\n\n---- Master thread prints an error message:\n") ; GrB_Matrix_new (NULL, GrB_FP64, 1, 1) ; printf ("master %d : Error: %s\n", id, GrB_error ( )) ; // finish GraphBLAS GrB_finalize ( ) ; // finish OpenMP exit (0) ; }
schedule-clauseg.c
#include <stdio.h> #include <stdlib.h> #ifdef _OPENMP #include <omp.h> #else #define omp_get_thread_num() 0 #endif main(int argc, char **argv) { int i, n=20,chunk,a[n],suma=0; if(argc < 3) { fprintf(stderr,"\nFalta iteraciones y/o chunk \n"); exit(-1); } n = atoi(argv[1]); if (n>20) n=20; chunk = atoi(argv[2]); for (i=0; i<n; i++) a[i] = i; #pragma omp parallel for firstprivate(suma) \ lastprivate(suma) schedule(guided,chunk) for (i=0; i<n; i++) { suma = suma + a[i]; printf(" thread %d suma a[%d]=%d suma=%d \n", omp_get_thread_num(),i,a[i],suma); } printf("Fuera de 'parallel for' suma=%d\n",suma); }
stencil.c
/* Copyright (c) 2013, Intel Corporation Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of Intel Corporation nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /******************************************************************* NAME: Stencil PURPOSE: This program tests the efficiency with which a space-invariant, linear, symmetric filter (stencil) can be applied to a square grid or image. USAGE: The program takes as input the number of threads, the linear dimension of the grid, and the number of iterations on the grid <progname> <# threads> <iterations> <grid size> The output consists of diagnostics to make sure the algorithm worked, and of timing statistics. FUNCTIONS CALLED: Other than OpenMP or standard C functions, the following functions are used in this program: wtime() bail_out() HISTORY: - Written by Rob Van der Wijngaart, November 2006. - RvdW: Removed unrolling pragmas for clarity; added constant to array "in" at end of each iteration to force refreshing of neighbor data in parallel versions; August 2013 *******************************************************************/ #include <par-res-kern_general.h> #include <par-res-kern_omp.h> #ifndef RADIUS #define RADIUS 2 #endif #ifdef DOUBLE #define DTYPE double #define EPSILON 1.e-8 #define COEFX 1.0 #define COEFY 1.0 #define FSTR "%lf" #else #define DTYPE float #define EPSILON 0.0001f #define COEFX 1.0f #define COEFY 1.0f #define FSTR "%f" #endif /* define shorthand for indexing a multi-dimensional array */ #define IN(i,j) in[i+(j)*(n)] #define OUT(i,j) out[i+(j)*(n)] #define WEIGHT(ii,jj) weight[ii+RADIUS][jj+RADIUS] int main(int argc, char ** argv) { int n; /* linear grid dimension */ int i, j, ii, jj, it, jt, iter; /* dummies */ DTYPE norm, /* L1 norm of solution */ reference_norm; DTYPE f_active_points; /* interior of grid with respect to stencil */ DTYPE flops; /* floating point ops per iteration */ int iterations; /* number of times to run the algorithm */ double stencil_time, /* timing parameters */ avgtime; int stencil_size; /* number of points in stencil */ int nthread_input, /* thread parameters */ nthread; DTYPE * RESTRICT in; /* input grid values */ DTYPE * RESTRICT out; /* output grid values */ long total_length; /* total required length to store grid values */ int num_error=0; /* flag that signals that requested and obtained numbers of threads are the same */ DTYPE weight[2*RADIUS+1][2*RADIUS+1]; /* weights of points in the stencil */ /******************************************************************************* ** process and test input parameters ********************************************************************************/ if (argc != 4){ printf("Usage: %s <# threads> <# iterations> <array dimension>\n", *argv); return(EXIT_FAILURE); } /* Take number of threads to request from command line */ nthread_input = atoi(*++argv); if ((nthread_input < 1) || (nthread_input > MAX_THREADS)) { printf("ERROR: Invalid number of threads: %d\n", nthread_input); exit(EXIT_FAILURE); } omp_set_num_threads(nthread_input); iterations = atoi(*++argv); if (iterations < 1){ printf("ERROR: iterations must be >= 1 : %d \n",iterations); exit(EXIT_FAILURE); } n = atoi(*++argv); if (n < 1){ printf("ERROR: grid dimension must be positive: %d\n", n); exit(EXIT_FAILURE); } if (RADIUS < 1) { printf("ERROR: Stencil radius %d should be positive\n", RADIUS); exit(EXIT_FAILURE); } if (2*RADIUS +1 > n) { printf("ERROR: Stencil radius %d exceeds grid size %d\n", RADIUS, n); exit(EXIT_FAILURE); } /* make sure the vector space can be represented */ total_length = n*n*sizeof(DTYPE); in = (DTYPE *) malloc(total_length); out = (DTYPE *) malloc(total_length); if (!in || !out) { printf("ERROR: could not allocate space for input or output array\n"); exit(EXIT_FAILURE); } /* fill the stencil weights to reflect a discrete divergence operator */ for (jj=-RADIUS; jj<=RADIUS; jj++) for (ii=-RADIUS; ii<=RADIUS; ii++) WEIGHT(ii,jj) = (DTYPE) 0.0; #ifdef STAR stencil_size = 4*RADIUS+1; for (ii=1; ii<=RADIUS; ii++) { WEIGHT(0, ii) = WEIGHT( ii,0) = (DTYPE) (1.0/(2.0*ii*RADIUS)); WEIGHT(0,-ii) = WEIGHT(-ii,0) = -(DTYPE) (1.0/(2.0*ii*RADIUS)); } #else stencil_size = (2*RADIUS+1)*(2*RADIUS+1); for (jj=1; jj<=RADIUS; jj++) { for (ii=-jj+1; ii<jj; ii++) { WEIGHT(ii,jj) = (DTYPE) (1.0/(4.0*jj*(2.0*jj-1)*RADIUS)); WEIGHT(ii,-jj) = -(DTYPE) (1.0/(4.0*jj*(2.0*jj-1)*RADIUS)); WEIGHT(jj,ii) = (DTYPE) (1.0/(4.0*jj*(2.0*jj-1)*RADIUS)); WEIGHT(-jj,ii) = -(DTYPE) (1.0/(4.0*jj*(2.0*jj-1)*RADIUS)); } WEIGHT(jj,jj) = (DTYPE) (1.0/(4.0*jj*RADIUS)); WEIGHT(-jj,-jj) = -(DTYPE) (1.0/(4.0*jj*RADIUS)); } #endif norm = (DTYPE) 0.0; f_active_points = (DTYPE) (n-2*RADIUS)*(DTYPE) (n-2*RADIUS); #pragma omp parallel private(i, j, ii, jj, it, jt, iter) { #pragma omp master { nthread = omp_get_num_threads(); printf("OpenMP stencil execution on 2D grid\n"); if (nthread != nthread_input) { num_error = 1; printf("ERROR: number of requested threads %d does not equal ", nthread_input); printf("number of spawned threads %d\n", nthread); } else { printf("Number of threads = %d\n",nthread_input); printf("Grid size = %d\n", n); printf("Radius of stencil = %d\n", RADIUS); printf("Number of iterations = %d\n", iterations); #ifdef STAR printf("Type of stencil = star\n"); #else printf("Type of stencil = compact\n"); #endif #ifdef DOUBLE printf("Data type = double precision\n"); #else printf("Data type = single precision\n"); #endif #ifndef PARALLELFOR printf("Parallel regions = fused (omp for)\n"); #else printf("Parallel regions = split (omp parallel for)\n"); #endif } } bail_out(num_error); #ifdef PARALLELFOR } #endif /* intialize the input and output arrays */ #ifdef PARALLELFOR #pragma omp parallel for private(i) #else #pragma omp for #endif for (j=0; j<n; j++) for (i=0; i<n; i++) IN(i,j) = COEFX*i+COEFY*j; #ifdef PARALLELFOR #pragma omp parallel for private(i) #else #pragma omp for #endif for (j=RADIUS; j<n-RADIUS; j++) for (i=RADIUS; i<n-RADIUS; i++) OUT(i,j) = (DTYPE)0.0; for (iter = 0; iter<=iterations; iter++){ /* start timer after a warmup iteration */ if (iter == 1) { #ifndef PARALLELFOR #pragma omp barrier #pragma omp master #endif { stencil_time = wtime(); } } #ifdef PARALLELFOR #pragma omp parallel for private(i, ii, jj) #else #pragma omp for #endif for (j=RADIUS; j<n-RADIUS; j++) { for (i=RADIUS; i<n-RADIUS; i++) { #ifdef STAR for (jj=-RADIUS; jj<=RADIUS; jj++) OUT(i,j) += WEIGHT(0,jj)*IN(i,j+jj); for (ii=-RADIUS; ii<0; ii++) OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j); for (ii=1; ii<=RADIUS; ii++) OUT(i,j) += WEIGHT(ii,0)*IN(i+ii,j); #else /* would like to be able to unroll this loop, but compiler will ignore */ for (jj=-RADIUS; jj<=RADIUS; jj++) for (ii=-RADIUS; ii<=RADIUS; ii++) OUT(i,j) += WEIGHT(ii,jj)*IN(i+ii,j+jj); #endif } } /* add constant to solution to force refresh of neighbor data, if any */ #ifdef PARALLELFOR #pragma omp parallel for private(i) #else #pragma omp for #endif for (j=0; j<n; j++) for (i=0; i<n; i++) IN(i,j)+= 1.0; } /* end of iterations */ #ifndef PARALLELFOR #pragma omp barrier #pragma omp master #endif { stencil_time = wtime() - stencil_time; } /* compute L1 norm in parallel */ #ifdef PARALLELFOR #pragma omp parallel for reduction(+:norm), private (i) #else #pragma omp for reduction(+:norm) #endif for (j=RADIUS; j<n-RADIUS; j++) for (i=RADIUS; i<n-RADIUS; i++) { norm += (DTYPE)ABS(OUT(i,j)); } #ifndef PARALLELFOR } /* end of OPENMP parallel region */ #endif norm /= f_active_points; /******************************************************************************* ** Analyze and output results. ********************************************************************************/ /* verify correctness */ reference_norm = (DTYPE) (iterations+1) * (COEFX + COEFY); if (ABS(norm-reference_norm) > EPSILON) { printf("ERROR: L1 norm = "FSTR", Reference L1 norm = "FSTR"\n", norm, reference_norm); exit(EXIT_FAILURE); } else { printf("Solution validates\n"); #ifdef VERBOSE printf("Reference L1 norm = "FSTR", L1 norm = "FSTR"\n", reference_norm, norm); #endif } flops = (DTYPE) (2*stencil_size+1) * f_active_points; avgtime = stencil_time/iterations; printf("Rate (MFlops/s): "FSTR" Avg time (s): %lf\n", 1.0E-06 * flops/avgtime, avgtime); exit(EXIT_SUCCESS); }
common.h
/*! * Copyright (c) 2016 Microsoft Corporation. All rights reserved. * Licensed under the MIT License. See LICENSE file in the project root for license information. */ #ifndef LIGHTGBM_UTILS_COMMON_FUN_H_ #define LIGHTGBM_UTILS_COMMON_FUN_H_ #include <LightGBM/utils/log.h> #include <LightGBM/utils/openmp_wrapper.h> #include <limits> #include <string> #include <algorithm> #include <cmath> #include <cstdint> #include <cstdio> #include <functional> #include <iomanip> #include <iterator> #include <memory> #include <sstream> #include <type_traits> #include <utility> #include <vector> #ifdef _MSC_VER #include "intrin.h" #endif namespace LightGBM { class Timer { protected: std::chrono::time_point<std::chrono::high_resolution_clock> start_; public: Timer() { start(); } void start() { start_ = std::chrono::high_resolution_clock::now(); } long ns() const { auto now = std::chrono::high_resolution_clock::now(); return std::chrono::nanoseconds(now - start_).count(); } double s() const { auto now = std::chrono::high_resolution_clock::now(); return 1.0 * std::chrono::nanoseconds(now - start_).count() / 1000 / 1000 / 1000; } }; namespace Common { inline static char tolower(char in) { if (in <= 'Z' && in >= 'A') return in - ('Z' - 'z'); return in; } inline static std::string Trim(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of(" \f\n\r\t\v") + 1); str.erase(0, str.find_first_not_of(" \f\n\r\t\v")); return str; } inline static std::string RemoveQuotationSymbol(std::string str) { if (str.empty()) { return str; } str.erase(str.find_last_not_of("'\"") + 1); str.erase(0, str.find_first_not_of("'\"")); return str; } inline static bool StartsWith(const std::string& str, const std::string prefix) { if (str.substr(0, prefix.size()) == prefix) { return true; } else { return false; } } inline static std::vector<std::string> Split(const char* c_str, char delimiter) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == delimiter) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> SplitLines(const char* c_str) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { if (str[pos] == '\n' || str[pos] == '\r') { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } // skip the line endings while (str[pos] == '\n' || str[pos] == '\r') ++pos; // new begin i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } inline static std::vector<std::string> Split(const char* c_str, const char* delimiters) { std::vector<std::string> ret; std::string str(c_str); size_t i = 0; size_t pos = 0; while (pos < str.length()) { bool met_delimiters = false; for (int j = 0; delimiters[j] != '\0'; ++j) { if (str[pos] == delimiters[j]) { met_delimiters = true; break; } } if (met_delimiters) { if (i < pos) { ret.push_back(str.substr(i, pos - i)); } ++pos; i = pos; } else { ++pos; } } if (i < pos) { ret.push_back(str.substr(i)); } return ret; } template<typename T> inline static const char* Atoi(const char* p, T* out) { int sign; T value; while (*p == ' ') { ++p; } sign = 1; if (*p == '-') { sign = -1; ++p; } else if (*p == '+') { ++p; } for (value = 0; *p >= '0' && *p <= '9'; ++p) { value = value * 10 + (*p - '0'); } *out = static_cast<T>(sign * value); while (*p == ' ') { ++p; } return p; } template<typename T> inline static double Pow(T base, int power) { if (power < 0) { return 1.0 / Pow(base, -power); } else if (power == 0) { return 1; } else if (power % 2 == 0) { return Pow(base*base, power / 2); } else if (power % 3 == 0) { return Pow(base*base*base, power / 3); } else { return base * Pow(base, power - 1); } } inline static const char* Atof(const char* p, double* out) { int frac; double sign, value, scale; *out = NAN; // Skip leading white space, if any. while (*p == ' ') { ++p; } // Get sign, if any. sign = 1.0; if (*p == '-') { sign = -1.0; ++p; } else if (*p == '+') { ++p; } // is a number if ((*p >= '0' && *p <= '9') || *p == '.' || *p == 'e' || *p == 'E') { // Get digits before decimal point or exponent, if any. for (value = 0.0; *p >= '0' && *p <= '9'; ++p) { value = value * 10.0 + (*p - '0'); } // Get digits after decimal point, if any. if (*p == '.') { double right = 0.0; int nn = 0; ++p; while (*p >= '0' && *p <= '9') { right = (*p - '0') + right * 10.0; ++nn; ++p; } value += right / Pow(10.0, nn); } // Handle exponent, if any. frac = 0; scale = 1.0; if ((*p == 'e') || (*p == 'E')) { uint32_t expon; // Get sign of exponent, if any. ++p; if (*p == '-') { frac = 1; ++p; } else if (*p == '+') { ++p; } // Get digits of exponent, if any. for (expon = 0; *p >= '0' && *p <= '9'; ++p) { expon = expon * 10 + (*p - '0'); } if (expon > 308) expon = 308; // Calculate scaling factor. while (expon >= 50) { scale *= 1E50; expon -= 50; } while (expon >= 8) { scale *= 1E8; expon -= 8; } while (expon > 0) { scale *= 10.0; expon -= 1; } } // Return signed and scaled floating point result. *out = sign * (frac ? (value / scale) : (value * scale)); } else { size_t cnt = 0; while (*(p + cnt) != '\0' && *(p + cnt) != ' ' && *(p + cnt) != '\t' && *(p + cnt) != ',' && *(p + cnt) != '\n' && *(p + cnt) != '\r' && *(p + cnt) != ':') { ++cnt; } if (cnt > 0) { std::string tmp_str(p, cnt); std::transform(tmp_str.begin(), tmp_str.end(), tmp_str.begin(), Common::tolower); if (tmp_str == std::string("na") || tmp_str == std::string("nan") || tmp_str == std::string("null")) { *out = NAN; } else if (tmp_str == std::string("inf") || tmp_str == std::string("infinity")) { *out = sign * 1e308; } else { Log::Fatal("Unknown token %s in data file", tmp_str.c_str()); } p += cnt; } } while (*p == ' ') { ++p; } return p; } inline static bool AtoiAndCheck(const char* p, int* out) { const char* after = Atoi(p, out); if (*after != '\0') { return false; } return true; } inline static bool AtofAndCheck(const char* p, double* out) { const char* after = Atof(p, out); if (*after != '\0') { return false; } return true; } inline static unsigned CountDecimalDigit32(uint32_t n) { #if defined(_MSC_VER) || defined(__GNUC__) static const uint32_t powers_of_10[] = { 0, 10, 100, 1000, 10000, 100000, 1000000, 10000000, 100000000, 1000000000 }; #ifdef _MSC_VER unsigned long i = 0; _BitScanReverse(&i, n | 1); uint32_t t = (i + 1) * 1233 >> 12; #elif __GNUC__ uint32_t t = (32 - __builtin_clz(n | 1)) * 1233 >> 12; #endif return t - (n < powers_of_10[t]) + 1; #else if (n < 10) return 1; if (n < 100) return 2; if (n < 1000) return 3; if (n < 10000) return 4; if (n < 100000) return 5; if (n < 1000000) return 6; if (n < 10000000) return 7; if (n < 100000000) return 8; if (n < 1000000000) return 9; return 10; #endif } inline static void Uint32ToStr(uint32_t value, char* buffer) { const char kDigitsLut[200] = { '0', '0', '0', '1', '0', '2', '0', '3', '0', '4', '0', '5', '0', '6', '0', '7', '0', '8', '0', '9', '1', '0', '1', '1', '1', '2', '1', '3', '1', '4', '1', '5', '1', '6', '1', '7', '1', '8', '1', '9', '2', '0', '2', '1', '2', '2', '2', '3', '2', '4', '2', '5', '2', '6', '2', '7', '2', '8', '2', '9', '3', '0', '3', '1', '3', '2', '3', '3', '3', '4', '3', '5', '3', '6', '3', '7', '3', '8', '3', '9', '4', '0', '4', '1', '4', '2', '4', '3', '4', '4', '4', '5', '4', '6', '4', '7', '4', '8', '4', '9', '5', '0', '5', '1', '5', '2', '5', '3', '5', '4', '5', '5', '5', '6', '5', '7', '5', '8', '5', '9', '6', '0', '6', '1', '6', '2', '6', '3', '6', '4', '6', '5', '6', '6', '6', '7', '6', '8', '6', '9', '7', '0', '7', '1', '7', '2', '7', '3', '7', '4', '7', '5', '7', '6', '7', '7', '7', '8', '7', '9', '8', '0', '8', '1', '8', '2', '8', '3', '8', '4', '8', '5', '8', '6', '8', '7', '8', '8', '8', '9', '9', '0', '9', '1', '9', '2', '9', '3', '9', '4', '9', '5', '9', '6', '9', '7', '9', '8', '9', '9' }; unsigned digit = CountDecimalDigit32(value); buffer += digit; *buffer = '\0'; while (value >= 100) { const unsigned i = (value % 100) << 1; value /= 100; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } if (value < 10) { *--buffer = static_cast<char>(value) + '0'; } else { const unsigned i = value << 1; *--buffer = kDigitsLut[i + 1]; *--buffer = kDigitsLut[i]; } } inline static void Int32ToStr(int32_t value, char* buffer) { uint32_t u = static_cast<uint32_t>(value); if (value < 0) { *buffer++ = '-'; u = ~u + 1; } Uint32ToStr(u, buffer); } inline static void DoubleToStr(double value, char* buffer, size_t #ifdef _MSC_VER buffer_len #endif ) { #ifdef _MSC_VER sprintf_s(buffer, buffer_len, "%.17g", value); #else sprintf(buffer, "%.17g", value); #endif } inline static const char* SkipSpaceAndTab(const char* p) { while (*p == ' ' || *p == '\t') { ++p; } return p; } inline static const char* SkipReturn(const char* p) { while (*p == '\n' || *p == '\r' || *p == ' ') { ++p; } return p; } template<typename T, typename T2> inline static std::vector<T2> ArrayCast(const std::vector<T>& arr) { std::vector<T2> ret(arr.size()); for (size_t i = 0; i < arr.size(); ++i) { ret[i] = static_cast<T2>(arr[i]); } return ret; } template<typename T, bool is_float, bool is_unsign> struct __TToStringHelperFast { void operator()(T value, char* buffer, size_t) const { Int32ToStr(value, buffer); } }; template<typename T> struct __TToStringHelperFast<T, true, false> { void operator()(T value, char* buffer, size_t #ifdef _MSC_VER buf_len #endif ) const { #ifdef _MSC_VER sprintf_s(buffer, buf_len, "%g", value); #else sprintf(buffer, "%g", value); #endif } }; template<typename T> struct __TToStringHelperFast<T, false, true> { void operator()(T value, char* buffer, size_t) const { Uint32ToStr(value, buffer); } }; template<typename T> inline static std::string ArrayToStringFast(const std::vector<T>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } __TToStringHelperFast<T, std::is_floating_point<T>::value, std::is_unsigned<T>::value> helper; const size_t buf_len = 16; std::vector<char> buffer(buf_len); std::stringstream str_buf; helper(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { helper(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } inline static std::string ArrayToString(const std::vector<double>& arr, size_t n) { if (arr.empty() || n == 0) { return std::string(""); } const size_t buf_len = 32; std::vector<char> buffer(buf_len); std::stringstream str_buf; DoubleToStr(arr[0], buffer.data(), buf_len); str_buf << buffer.data(); for (size_t i = 1; i < std::min(n, arr.size()); ++i) { DoubleToStr(arr[i], buffer.data(), buf_len); str_buf << ' ' << buffer.data(); } return str_buf.str(); } template<typename T, bool is_float> struct __StringToTHelper { T operator()(const std::string& str) const { T ret = 0; Atoi(str.c_str(), &ret); return ret; } }; template<typename T> struct __StringToTHelper<T, true> { T operator()(const std::string& str) const { return static_cast<T>(std::stod(str)); } }; template<typename T> inline static std::vector<T> StringToArray(const std::string& str, char delimiter) { std::vector<std::string> strs = Split(str.c_str(), delimiter); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T> inline static std::vector<T> StringToArray(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } std::vector<std::string> strs = Split(str.c_str(), ' '); CHECK(strs.size() == static_cast<size_t>(n)); std::vector<T> ret; ret.reserve(strs.size()); __StringToTHelper<T, std::is_floating_point<T>::value> helper; for (const auto& s : strs) { ret.push_back(helper(s)); } return ret; } template<typename T, bool is_float> struct __StringToTHelperFast { const char* operator()(const char*p, T* out) const { return Atoi(p, out); } }; template<typename T> struct __StringToTHelperFast<T, true> { const char* operator()(const char*p, T* out) const { double tmp = 0.0f; auto ret = Atof(p, &tmp); *out = static_cast<T>(tmp); return ret; } }; template<typename T> inline static std::vector<T> StringToArrayFast(const std::string& str, int n) { if (n == 0) { return std::vector<T>(); } auto p_str = str.c_str(); __StringToTHelperFast<T, std::is_floating_point<T>::value> helper; std::vector<T> ret(n); for (int i = 0; i < n; ++i) { p_str = helper(p_str, &ret[i]); } return ret; } template<typename T> inline static std::string Join(const std::vector<T>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[0]; for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } template<> inline std::string Join<int8_t>(const std::vector<int8_t>& strs, const char* delimiter) { if (strs.empty()) { return std::string(""); } std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << static_cast<int16_t>(strs[0]); for (size_t i = 1; i < strs.size(); ++i) { str_buf << delimiter; str_buf << static_cast<int16_t>(strs[i]); } return str_buf.str(); } template<typename T> inline static std::string Join(const std::vector<T>& strs, size_t start, size_t end, const char* delimiter) { if (end - start <= 0) { return std::string(""); } start = std::min(start, static_cast<size_t>(strs.size()) - 1); end = std::min(end, static_cast<size_t>(strs.size())); std::stringstream str_buf; str_buf << std::setprecision(std::numeric_limits<double>::digits10 + 2); str_buf << strs[start]; for (size_t i = start + 1; i < end; ++i) { str_buf << delimiter; str_buf << strs[i]; } return str_buf.str(); } inline static int64_t Pow2RoundUp(int64_t x) { int64_t t = 1; for (int i = 0; i < 64; ++i) { if (t >= x) { return t; } t <<= 1; } return 0; } /*! * \brief Do inplace softmax transformaton on p_rec * \param p_rec The input/output vector of the values. */ inline static void Softmax(std::vector<double>* p_rec) { std::vector<double> &rec = *p_rec; double wmax = rec[0]; for (size_t i = 1; i < rec.size(); ++i) { wmax = std::max(rec[i], wmax); } double wsum = 0.0f; for (size_t i = 0; i < rec.size(); ++i) { rec[i] = std::exp(rec[i] - wmax); wsum += rec[i]; } for (size_t i = 0; i < rec.size(); ++i) { rec[i] /= static_cast<double>(wsum); } } inline static void Softmax(const double* input, double* output, int len) { double wmax = input[0]; for (int i = 1; i < len; ++i) { wmax = std::max(input[i], wmax); } double wsum = 0.0f; for (int i = 0; i < len; ++i) { output[i] = std::exp(input[i] - wmax); wsum += output[i]; } for (int i = 0; i < len; ++i) { output[i] /= static_cast<double>(wsum); } } template<typename T> std::vector<const T*> ConstPtrInVectorWrapper(const std::vector<std::unique_ptr<T>>& input) { std::vector<const T*> ret; for (size_t i = 0; i < input.size(); ++i) { ret.push_back(input.at(i).get()); } return ret; } template<typename T1, typename T2> inline static void SortForPair(std::vector<T1>* keys, std::vector<T2>* values, size_t start, bool is_reverse = false) { std::vector<std::pair<T1, T2>> arr; for (size_t i = start; i < keys->size(); ++i) { arr.emplace_back(keys->at(i), values->at(i)); } if (!is_reverse) { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first < b.first; }); } else { std::stable_sort(arr.begin(), arr.end(), [](const std::pair<T1, T2>& a, const std::pair<T1, T2>& b) { return a.first > b.first; }); } for (size_t i = start; i < arr.size(); ++i) { keys->at(i) = arr[i].first; values->at(i) = arr[i].second; } } template <typename T> inline static std::vector<T*> Vector2Ptr(std::vector<std::vector<T>>* data) { std::vector<T*> ptr(data->size()); for (size_t i = 0; i < data->size(); ++i) { ptr[i] = data->at(i).data(); } return ptr; } template <typename T> inline static std::vector<int> VectorSize(const std::vector<std::vector<T>>& data) { std::vector<int> ret(data.size()); for (size_t i = 0; i < data.size(); ++i) { ret[i] = static_cast<int>(data[i].size()); } return ret; } inline static double AvoidInf(double x) { if (std::isnan(x)) { return 0.0; } else if (x >= 1e300) { return 1e300; } else if (x <= -1e300) { return -1e300; } else { return x; } } inline static float AvoidInf(float x) { if (std::isnan(x)) { return 0.0f; } else if (x >= 1e38) { return 1e38f; } else if (x <= -1e38) { return -1e38f; } else { return x; } } template<typename _Iter> inline static typename std::iterator_traits<_Iter>::value_type* IteratorValType(_Iter) { return (0); } template<typename _RanIt, typename _Pr, typename _VTRanIt> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred, _VTRanIt*) { size_t len = _Last - _First; const size_t kMinInnerLen = 1024; int num_threads = 1; #pragma omp parallel #pragma omp master { num_threads = omp_get_num_threads(); } if (len <= kMinInnerLen || num_threads <= 1) { std::sort(_First, _Last, _Pred); return; } size_t inner_size = (len + num_threads - 1) / num_threads; inner_size = std::max(inner_size, kMinInnerLen); num_threads = static_cast<int>((len + inner_size - 1) / inner_size); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < num_threads; ++i) { size_t left = inner_size*i; size_t right = left + inner_size; right = std::min(right, len); if (right > left) { std::sort(_First + left, _First + right, _Pred); } } // Buffer for merge. std::vector<_VTRanIt> temp_buf(len); _RanIt buf = temp_buf.begin(); size_t s = inner_size; // Recursive merge while (s < len) { int loop_size = static_cast<int>((len + s * 2 - 1) / (s * 2)); #pragma omp parallel for schedule(static, 1) for (int i = 0; i < loop_size; ++i) { size_t left = i * 2 * s; size_t mid = left + s; size_t right = mid + s; right = std::min(len, right); if (mid >= right) { continue; } std::copy(_First + left, _First + mid, buf + left); std::merge(buf + left, buf + mid, _First + mid, _First + right, _First + left, _Pred); } s *= 2; } } template<typename _RanIt, typename _Pr> inline static void ParallelSort(_RanIt _First, _RanIt _Last, _Pr _Pred) { return ParallelSort(_First, _Last, _Pred, IteratorValType(_First)); } // Check that all y[] are in interval [ymin, ymax] (end points included); throws error if not template <typename T> inline static void CheckElementsIntervalClosed(const T *y, T ymin, T ymax, int ny, const char *callername) { auto fatal_msg = [&y, &ymin, &ymax, &callername](int i) { std::ostringstream os; os << "[%s]: does not tolerate element [#%i = " << y[i] << "] outside [" << ymin << ", " << ymax << "]"; Log::Fatal(os.str().c_str(), callername, i); }; for (int i = 1; i < ny; i += 2) { if (y[i - 1] < y[i]) { if (y[i - 1] < ymin) { fatal_msg(i - 1); } else if (y[i] > ymax) { fatal_msg(i); } } else { if (y[i - 1] > ymax) { fatal_msg(i - 1); } else if (y[i] < ymin) { fatal_msg(i); } } } if (ny & 1) { // odd if (y[ny - 1] < ymin || y[ny - 1] > ymax) { fatal_msg(ny - 1); } } } // One-pass scan over array w with nw elements: find min, max and sum of elements; // this is useful for checking weight requirements. template <typename T1, typename T2> inline static void ObtainMinMaxSum(const T1 *w, int nw, T1 *mi, T1 *ma, T2 *su) { T1 minw; T1 maxw; T1 sumw; int i; if (nw & 1) { // odd minw = w[0]; maxw = w[0]; sumw = w[0]; i = 2; } else { // even if (w[0] < w[1]) { minw = w[0]; maxw = w[1]; } else { minw = w[1]; maxw = w[0]; } sumw = w[0] + w[1]; i = 3; } for (; i < nw; i += 2) { if (w[i - 1] < w[i]) { minw = std::min(minw, w[i - 1]); maxw = std::max(maxw, w[i]); } else { minw = std::min(minw, w[i]); maxw = std::max(maxw, w[i - 1]); } sumw += w[i - 1] + w[i]; } if (mi != nullptr) { *mi = minw; } if (ma != nullptr) { *ma = maxw; } if (su != nullptr) { *su = static_cast<T2>(sumw); } } inline static std::vector<uint32_t> EmptyBitset(int n) { int size = n / 32; if (n % 32 != 0) ++size; return std::vector<uint32_t>(size); } template<typename T> inline static void InsertBitset(std::vector<uint32_t>* vec, const T val) { int i1 = val / 32; int i2 = val % 32; if (static_cast<int>(vec->size()) < i1 + 1) { vec->resize(i1 + 1, 0); } vec->at(i1) |= (1 << i2); } template<typename T> inline static std::vector<uint32_t> ConstructBitset(const T* vals, int n) { std::vector<uint32_t> ret; for (int i = 0; i < n; ++i) { int i1 = vals[i] / 32; int i2 = vals[i] % 32; if (static_cast<int>(ret.size()) < i1 + 1) { ret.resize(i1 + 1, 0); } ret[i1] |= (1 << i2); } return ret; } template<typename T> inline static bool FindInBitset(const uint32_t* bits, int n, T pos) { int i1 = pos / 32; if (i1 >= n) { return false; } int i2 = pos % 32; return (bits[i1] >> i2) & 1; } inline static bool CheckDoubleEqualOrdered(double a, double b) { double upper = std::nextafter(a, INFINITY); return b <= upper; } inline static double GetDoubleUpperBound(double a) { return std::nextafter(a, INFINITY);; } inline static size_t GetLine(const char* str) { auto start = str; while (*str != '\0' && *str != '\n' && *str != '\r') { ++str; } return str - start; } inline static const char* SkipNewLine(const char* str) { if (*str == '\r') { ++str; } if (*str == '\n') { ++str; } return str; } template <typename T> static int Sign(T x) { return (x > T(0)) - (x < T(0)); } template <typename T> static T SafeLog(T x) { if (x > 0) { return std::log(x); } else { return -INFINITY; } } inline bool CheckASCII(const std::string& s) { for (auto c : s) { if (static_cast<unsigned char>(c) > 127) { return false; } } return true; } } // namespace Common } // namespace LightGBM #endif // LightGBM_UTILS_COMMON_FUN_H_
erotima_3.c
#include <stdio.h> #include <math.h> #include <getopt.h> #include <stdlib.h> #include <sys/time.h> #include <omp.h> #include <string.h> #define MIN_NUM_OF_NEURONS (1L) #define DEF_NUM_OF_NEURONS (1000L) #define MIN_NUM_OF_NEIGHBORS (0L) #define DEF_NUM_OF_NEIGHBORS (300L) #define DEF_DT (1.0e-04) #define DEF_MU (1.0) #define DEF_UTH (0.98) #define DEF_S_MIN (0.7) #define DEF_S_MAX (0.7) #define DEF_SIM_TIME (20L) #define DEF_TTRANSIENT (-1L) void print_thread(); static struct option long_options[] = { {"dt", required_argument, 0, 'a'}, {"mu", required_argument, 0, 'b'}, {"uth", required_argument, 0, 'c'}, {"time", required_argument, 0, 'd'}, {"transient", required_argument, 0, 'e'}, {"s_min", required_argument, 0, 'f'}, {"s_max", required_argument, 0, 'g'}, {"n", required_argument, 0, 'n'}, {"r", required_argument, 0, 'r'}, {0, 0, 0, 0} }; int main(int argc, char *argv[]) { FILE *output1, *output2; long n, r; long i, j; long it; double divide; double dt; double tstep; long ntstep; long sim_time; long ttransient; long itime; double uth; double mu; double s_min; double s_max; double *u, *uplus, *sigma, *omega, *omega1, *temp_u; // double sum; double time; struct timeval global_start, global_end, IO_start, IO_end; double global_usec, IO_usec = 0.0; int c, option_index; char *end_ptr; n = DEF_NUM_OF_NEURONS; r = DEF_NUM_OF_NEIGHBORS; dt = DEF_DT; mu = DEF_MU; uth = DEF_UTH; s_min = DEF_S_MIN; s_max = DEF_S_MAX; sim_time = DEF_SIM_TIME; ttransient = DEF_TTRANSIENT; while (1) { c = getopt_long (argc, argv, "+n:r:", long_options, &option_index); if (c == -1) { break; } switch (c) { case 'a': dt = strtod(optarg, &end_ptr); if (*end_ptr != '\0') { printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg); exit(1); } if (dt <= 0.0) { printf("Option \"%s\": \"dt\" must be larger than zero.\n", long_options[option_index].name); exit(1); } break; case 'b': mu = strtod(optarg, &end_ptr); if (*end_ptr != '\0') { printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg); exit(1); } if (mu <= 0.0) { printf("Option \"%s\": \"mu\" must be larger than zero.\n", long_options[option_index].name); exit(1); } break; case 'c': uth = strtod(optarg, &end_ptr); if (*end_ptr != '\0') { printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg); exit(1); } if (uth <= 0.0) { printf("Option \"%s\": \"uth\" must be larger than zero.\n", long_options[option_index].name); exit(1); } break; case 'd': sim_time = strtol(optarg, &end_ptr, 10); if (*end_ptr != '\0') { printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg); exit(1); } if (sim_time < 1) { printf("Option \"%s\": Total simulation time must be larger than zero.\n", long_options[option_index].name); exit(1); } break; case 'e': ttransient = strtol(optarg, &end_ptr, 10); if (*end_ptr != '\0') { printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg); exit(1); } if (ttransient < 0) { printf("Option \"%s\": \"ttransient\" must be larger or equal than zero.\n", long_options[option_index].name); exit(1); } break; case 'f': s_min = strtod(optarg, &end_ptr); if (*end_ptr != '\0') { printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg); exit(1); } if (s_min <= 0.0) { printf("Option \"%s\": \"s_min\" must be larger than zero.\n", long_options[option_index].name); exit(1); } break; case 'g': s_max = strtod(optarg, &end_ptr); if (*end_ptr != '\0') { printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg); exit(1); } if (s_max <= 0.0) { printf("Option \"%s\": \"s_max\" must be larger than zero.\n", long_options[option_index].name); exit(1); } break; case 'n': n = strtol(optarg, &end_ptr, 10); if (*end_ptr != '\0') { printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg); exit(1); } if (n < MIN_NUM_OF_NEURONS) { printf("Option \"%s\": Number of neurons must be at least %ld.\n", long_options[option_index].name, MIN_NUM_OF_NEURONS); exit(1); } break; case 'r': r = strtol(optarg, &end_ptr, 10); if (*end_ptr != '\0') { printf("Option \"%s\": Invalid argument \"%s\".\n", long_options[option_index].name, optarg); exit(1); } if (r < MIN_NUM_OF_NEIGHBORS) { printf("Option \"%s\": Number of neighbors must be at least %ld.\n", long_options[option_index].name, MIN_NUM_OF_NEIGHBORS); exit(1); } break; case '?': default: exit(1); break; } } if (optind != argc) { printf("Unknown option \"%s\".\n", argv[optind]); exit(1); } if (2 * r + 1 > n) { printf("Total number of neighbors and reference neuron (2 * %ld + 1 = %ld) cannot exceed number of neurons (%ld).\n", r, 2 * r + 1, n); exit(1); } if (s_min > s_max) { printf("s_min (%17.15f) must be smaller or equal than s_max (%17.15f).\n", s_min, s_max); exit(1); } divide = (double)(2 * r); tstep = 1.0 / dt; ntstep = (long)tstep; if (ttransient == DEF_TTRANSIENT) { ttransient = (sim_time * ntstep) / 2; } else { ttransient *= ntstep; } itime = sim_time * ntstep; printf("Running simulation with following parameters:\n"); printf(" Number of neurons : %ld\n", n); printf(" Numger of neighbours: %ld\n", r); printf(" Simulation time : %ld seconds (%ld time steps)\n", sim_time, itime); printf(" Transient time : %ld seconds (%ld time steps)\n", ttransient / ntstep, ttransient); printf(" dt : %.1e seconds \n", dt); printf(" mu : %17.15f\n", mu); printf(" uth : %17.15f\n", uth); printf(" s_min : %17.15f\n", s_min); printf(" s_max : %17.15f\n", s_max); output1 = fopen("spacetime.out", "w"); if (output1 == NULL) { printf("Could not open file \"spacetime.out\""); exit(1); } output2 = fopen("omega.out", "w"); if (output2 == NULL) { printf("Could not open file \"omega.out\""); exit(1); } u = (double *)calloc(n, sizeof(double)); if (u == NULL) { printf("Could not allocate memory for \"u\".\n"); exit(1); } uplus = (double *)calloc(n, sizeof(double)); if (uplus == NULL) { printf("Could not allocate memory for \"uplus\".\n"); exit(1); } sigma = (double *)calloc(n * n, sizeof(double)); if (sigma == NULL) { printf("Could not allocate memory for \"sigma\".\n"); exit(1); } omega = (double *)calloc(n, sizeof(double)); if (omega == NULL) { printf("Could not allocate memory for \"omega\".\n"); exit(1); } omega1 = (double *)calloc(n, sizeof(double)); if (omega1 == NULL) { printf("Could not allocate memory for \"omega1\".\n"); exit(1); } temp_u = (double *)calloc(n, sizeof(double)); if (temp_u == NULL) { printf("Could not allocate memory for \"temp_u\".\n"); exit(1); } for (i = 0; i < n;) { u[i] = drand48(); // temp_u[i] = u[i] + dt * (mu - u[i]); printf("%ld\t%f\n", i, u[i]); i++; u[i] = drand48(); // temp_u[i] = u[i] + dt * (mu - u[i]); printf("%ld\t%f\n", i, u[i]); i++; u[i] = drand48(); // temp_u[i] = u[i] + dt * (mu - u[i]); printf("%ld\t%f\n", i, u[i]); i++; u[i] = drand48(); // temp_u[i] = u[i] + dt * (mu - u[i]); printf("%ld\t%f\n", i, u[i]); i++; u[i] = drand48(); // temp_u[i] = u[i] + dt * (mu - u[i]); printf("%ld\t%f\n", i, u[i]); i++; u[i] = drand48(); // temp_u[i] = u[i] + dt * (mu - u[i]); printf("%ld\t%f\n", i, u[i]); i++; u[i] = drand48(); // temp_u[i] = u[i] + dt * (mu - u[i]); printf("%ld\t%f\n", i, u[i]); i++; u[i] = drand48(); // temp_u[i] = u[i] + dt * (mu - u[i]); printf("%ld\t%f\n", i, u[i]); i++; u[i] = drand48(); // temp_u[i] = u[i] + dt * (mu - u[i]); printf("%ld\t%f\n", i, u[i]); i++; u[i] = drand48(); // temp_u[i] = u[i] + dt * (mu - u[i]); printf("%ld\t%f\n", i, u[i]); i++; } /* Read connectivity matrix sigma[n][n] from file or construct connectivity matrix. */ for (i = 0; i < r; i++) { for (j = 0; j < i + r + 1; j++) { sigma[i * n + j] = s_min + (s_max - s_min) * drand48(); } for (j = n - r + i; j < n; j++) { sigma[i * n + j] = s_min + (s_max - s_min) * drand48(); } } for (i = r; i < n - r; i++) { for (j = 0; j < 2 * r + 1; j++) { sigma[i * n + j + i - r] = s_min + (s_max - s_min) * drand48(); } } for (i = n - r; i < n; i++) { for (j = 0; j < i - n + r + 1; j++) { sigma[i * n + j] = s_min + (s_max - s_min) * drand48(); } for (j = i - r; j < n; j++) { sigma[i * n + j] = s_min + (s_max - s_min) * drand48(); } } #if 0 for (i = 0; i < n; i++) { for (j = 0; j < n; j++) { printf("%4.1f", sigma[i * n + j]); } printf("\n"); } #endif /* Temporal iteration. */ gettimeofday(&global_start, NULL); double sum; double temp; int step; double semi_sum = 0.0; // // omp_set_dynamic(0); // Explicitly disable dynamic teams // omp_set_num_threads(4); // Use 4 threads for all consecutive parallel regions for (i = 0; i < n; i++) { semi_sum += sigma[i]; } int end ; int index = 0; int time_index = 0; double *spacetime_buf;//buffer gia to spacetime spacetime_buf = (double *)calloc(n * itime, sizeof(double)); double *omega_buf;//buffer gia to omega omega_buf = (double *)calloc(n * itime, sizeof(double)); long int *time_buf; time_buf = (long int *)calloc(itime, sizeof(long int)); double *final_sigma = (double *)calloc(n, sizeof(double)); int total_threads; #pragma omp parallel private(it,i,j,step) firstprivate(n,sigma,dt,mu,semi_sum) { double *temp_u; int thread_id = omp_get_thread_num(); int array_pos, position; total_threads = omp_get_num_threads(); position = (n / total_threads) * n * thread_id; array_pos = (n / total_threads) * thread_id; temp_u = (double *)calloc(n / total_threads, sizeof(double)); for (it = 0; it < itime; it++) { cblas_dgemv(CblasRowMajor, CblasNoTrans, n / total_threads, n, 1.0, sigma + position, n, u, 1 , 0.0, temp_u, 1); memcpy( final_sigma + array_pos, temp_u, n / total_threads * sizeof * final_sigma ); #pragma omp for schedule(static,8) for (i = 0; i < n; i++) { step = i * n; #pragma omp atomic write uplus[i] = ( u[i] + dt * (mu - u[i])) + dt * (final_sigma[i] - semi_sum * u[i]) / divide; if ( uplus[i] > uth) { #pragma omp atomic write uplus[i] = 0.0; if (it >= ttransient) { #pragma omp atomic omega1[i] += 1.0; } } } #pragma omp barrier #pragma omp single memcpy(u, uplus, n * sizeof * u); #if !defined(ALL_RESULTS) if (it % ntstep == 0) { #endif #pragma omp master { printf("Time is %ld\n", it); gettimeofday(&IO_start, NULL); // fprintf(output1, "%ld\t", it); time_buf[time_index] = it; time_index++; // printf("time buf: %ld index: %d\n",time_buf[index],index); time = (double)it * dt; for (i = 0; i < n; i++) { // printf("index: %d\n",index); // fprintf(output1, "%19.15f", u[i]); omega[i] = 2.0 * M_PI * omega1[i] / (time - ttransient * dt); spacetime_buf[index] = u[i]; omega_buf[index] = omega[i]; index++; } gettimeofday(&IO_end, NULL); IO_usec += ((IO_end.tv_sec - IO_start.tv_sec) * 1000000.0 + (IO_end.tv_usec - IO_start.tv_usec)); }//master end #if !defined(ALL_RESULTS) } #endif } #pragma omp single { fprintf(output1, "%ld\t", time_buf[0]); fprintf(output2, "%ld\t", time_buf[0]); #pragma omp task firstprivate(index) { int time_index1 = 1; // printf("thread to space: %d\n", omp_get_thread_num()); for (int i = 0; i < index; i++) { if ((i % (n) == 0) && i != 0) { fprintf(output1, "\n%ld\t", time_buf[time_index1]); time_index1++; } fprintf(output1, "%19.15f", spacetime_buf[i]); } } #pragma omp task firstprivate(index) { int time_index2 = 1; // printf("thread to omega: %d\n", omp_get_thread_num()); for (int i = 0; i < index; i++) { if ((i % (n) == 0) && i != 0) { fprintf(output2, "\n%ld\t", time_buf[time_index2]); time_index2++; } fprintf(output2, "%19.15f", omega_buf[i]); } } } }//omp parallel gettimeofday(&global_end, NULL); global_usec = ((global_end.tv_sec - global_start.tv_sec) * 1000000.0 + (global_end.tv_usec - global_start.tv_usec)); printf("Time for calculations = %13.6f sec\n", (global_usec - IO_usec) / 1000000.0); printf("Time for I/O = %13.6f sec\n", IO_usec / 1000000.0); printf("Total execution time = %13.6f sec\n", global_usec / 1000000.0); fclose(output1); fclose(output2); return 0; }
3DConvolution.c
/** * 3DConvolution.c: This file was adapted from PolyBench/GPU 1.0 test suite * to run on GPU with OpenMP 4.0 pragmas and OpenCL driver. * * http://www.cse.ohio-state.edu/~pouchet/software/polybench/GPU * * Contacts: Marcio M Pereira <mpereira@ic.unicamp.br> * Rafael Cardoso F Sousa <rafael.cardoso@students.ic.unicamp.br> * Luís Felipe Mattos <ra107822@students.ic.unicamp.br> */ #include <stdarg.h> #include <stdio.h> #include <stdlib.h> #include <string.h> #include <sys/time.h> #include <time.h> #include <unistd.h> #ifdef _OPENMP #include <omp.h> #endif #include "BenchmarksUtil.h" #define NI SIZE #define NJ SIZE #define NK SIZE /* Can switch DATA_TYPE between float and double */ typedef float DATA_TYPE; void conv3D(DATA_TYPE *A, DATA_TYPE *B) { int i, j, k; DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +2; c21 = +5; c31 = -8; c12 = -3; c22 = +6; c32 = -9; c13 = +4; c23 = +7; c33 = +10; for (j = 1; j < NJ - 1; ++j) { for (i = 1; i < NI - 1; ++i) { for (k = 1; k < NK - 1; ++k) { B[i * (NK * NJ) + j * NK + k] = c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c21 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c23 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c31 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c33 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c12 * A[(i + 0) * (NK * NJ) + (j - 1) * NK + (k + 0)] + c22 * A[(i + 0) * (NK * NJ) + (j + 0) * NK + (k + 0)] + c32 * A[(i + 0) * (NK * NJ) + (j + 1) * NK + (k + 0)] + c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] + c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] + c21 * A[(i - 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] + c23 * A[(i + 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] + c31 * A[(i - 1) * (NK * NJ) + (j + 1) * NK + (k + 1)] + c33 * A[(i + 1) * (NK * NJ) + (j + 1) * NK + (k + 1)]; } } } } void conv3D_OMP(DATA_TYPE *A, DATA_TYPE *B) { int i, j, k; DATA_TYPE c11, c12, c13, c21, c22, c23, c31, c32, c33; c11 = +2; c21 = +5; c31 = -8; c12 = -3; c22 = +6; c32 = -9; c13 = +4; c23 = +7; c33 = +10; unsigned long int size = NI; size *= NJ; size *= NK; #pragma omp target teams distribute parallel for \ map(to: A[:size]) \ map(from: B[:size]) \ device(OMP_DEVICE_ID) \ private(i, k) for (j = 1; j < NJ - 1; ++j) { LLVM_MCA_BEGIN("kernel"); for (i = 1; i < NI - 1; ++i) { for (k = 1; k < NK - 1; ++k) { B[i * (NK * NJ) + j * NK + k] = c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c21 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c23 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c31 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c33 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k - 1)] + c12 * A[(i + 0) * (NK * NJ) + (j - 1) * NK + (k + 0)] + c22 * A[(i + 0) * (NK * NJ) + (j + 0) * NK + (k + 0)] + c32 * A[(i + 0) * (NK * NJ) + (j + 1) * NK + (k + 0)] + c11 * A[(i - 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] + c13 * A[(i + 1) * (NK * NJ) + (j - 1) * NK + (k + 1)] + c21 * A[(i - 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] + c23 * A[(i + 1) * (NK * NJ) + (j + 0) * NK + (k + 1)] + c31 * A[(i - 1) * (NK * NJ) + (j + 1) * NK + (k + 1)] + c33 * A[(i + 1) * (NK * NJ) + (j + 1) * NK + (k + 1)]; } } LLVM_MCA_END("kernel"); } } void init(DATA_TYPE *A) { int i, j, k; for (i = 0; i < NI; ++i) { for (j = 0; j < NJ; ++j) { for (k = 0; k < NK; ++k) { A[i * (NK * NJ) + j * NK + k] = i % 12 + 2 * (j % 7) + 3 * (k % 13); } } } } int compareResults(DATA_TYPE *B, DATA_TYPE *B_OMP) { int i, j, k, fail; fail = 0; // Compare result from cpu and gpu... for (i = 1; i < NI - 1; ++i) { for (j = 1; j < NJ - 1; ++j) { for (k = 1; k < NK - 1; ++k) { if (percentDiff(B[i * (NK * NJ) + j * NK + k], B_OMP[i * (NK * NJ) + j * NK + k]) > ERROR_THRESHOLD) { fail++; } } } } return fail; } int main(int argc, char *argv[]) { fprintf(stdout, ">> Three dimensional (3D) convolution <<\n"); // small hack to cast the macros into unsigned longs (UL) and multiply without // overflowing unsigned long int size = NI; size *= NJ; size *= NK; // declare arrays and allocate memory DATA_TYPE *A = (DATA_TYPE *)malloc(size * sizeof(DATA_TYPE)); DATA_TYPE *B = NULL; DATA_TYPE *B_OMP = NULL; // initialize memory init(A); // run OMP on GPU or CPU if enabled #if defined(RUN_OMP_GPU) || defined(RUN_OMP_CPU) B_OMP = (DATA_TYPE *)malloc(size * sizeof(DATA_TYPE)); BENCHMARK_OMP(conv3D_OMP(A, B_OMP)); // prevent dead code elimination DCE_PREVENT(B_OMP, size); #endif // run sequential version if enabled #ifdef RUN_CPU_SEQ B = (DATA_TYPE *)malloc(size * sizeof(DATA_TYPE)); BENCHMARK_CPU(conv3D(A, B)); // prevent dead code elimination DCE_PREVENT(B, size); #endif int fail = 0; // if test mode enabled, compare the results #ifdef RUN_TEST fail += compareResults(B, B_OMP); printf("Errors on OMP (threshold %4.2lf): %d\n", ERROR_THRESHOLD, fail); #endif // Release memory free(A); free(B); free(B_OMP); return fail; }
pado_unw_unv_para.h
/* * pado.h * * Created on: Sep 4, 2018 * Author: Zhen Peng */ #ifndef INCLUDES_PADO_UNW_PARA_UNV_H_ #define INCLUDES_PADO_UNW_PARA_UNV_H_ #include <vector> #include <unordered_map> #include <map> #include <algorithm> #include <iostream> #include <limits.h> #include <xmmintrin.h> #include <bitset> #include <cmath> #include "globals.h" #include "graph.h" #include <omp.h> using std::vector; using std::unordered_map; using std::map; using std::bitset; using std::stable_sort; using std::min; using std::fill; namespace PADO { //inti NUM_THREADS = 4; //const inti BATCH_SIZE = 1024; // The size for regular batch and bit array. //const inti BITPARALLEL_SIZE = 50; //const inti THRESHOLD_PARALLEL = 80; //// Batch based processing, 09/11/2018 template <inti BATCH_SIZE = 1024> class ParaVertexCentricPLL { private: static const inti BITPARALLEL_SIZE = 50; idi num_v_ = 0; const inti THRESHOLD_PARALLEL = 80; // Structure for the type of label struct IndexType { struct Batch { idi batch_id; // Batch ID idi start_index; // Index to the array distances where the batch starts inti size; // Number of distances element in this batch Batch(idi batch_id_, idi start_index_, inti size_): batch_id(batch_id_), start_index(start_index_), size(size_) { ; } }; struct DistanceIndexType { idi start_index; // Index to the array vertices where the same-ditance vertices start inti size; // Number of the same-distance vertices smalli dist; // The real distance DistanceIndexType(idi start_index_, inti size_, smalli dist_): start_index(start_index_), size(size_), dist(dist_) { ; } }; smalli bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} vector<Batch> batches; // Batch info vector<DistanceIndexType> distances; // Distance info vector<idi> vertices; // Vertices in the label, preresented as temperory ID }; //__attribute__((aligned(64))); // Structure for the type of temporary label struct ShortIndex { // I use BATCH_SIZE + 1 bit for indicator bit array. // The v.indicator[BATCH_SIZE] is set if in current batch v has got any new labels already. // In this way, when do initialization, only initialize those short_index[v] whose indicator[BATCH_SIZE] is set. bitset<BATCH_SIZE + 1> indicator; // Global indicator, indicator[r] (0 <= r < BATCH_SIZE) is set means root r once selected as candidate already // bitset<BATCH_SIZE> candidates; // Candidates one iteration, candidates[r] is set means root r is candidate in this iteration // Use a queue to store candidates vector<inti> candidates_que = vector<inti>(BATCH_SIZE); inti end_candidates_que = 0; vector<uint8_t> is_candidate = vector<uint8_t>(BATCH_SIZE, 0); }; //__attribute__((aligned(64))); // Structure of the public ordered index for distance queries. struct IndexOrdered { weighti bp_dist[BITPARALLEL_SIZE]; uint64_t bp_sets[BITPARALLEL_SIZE][2]; // [0]: S^{-1}, [1]: S^{0} vector<idi> label_id; vector<weighti> label_dists; }; vector<IndexType> L; vector<IndexOrdered> Index; // Ordered labels for original vertex ID void construct(const Graph &G); inline void bit_parallel_labeling( const Graph &G, vector<IndexType> &L, vector<uint8_t> &used_bp_roots); // inline void bit_parallel_labeling( // const Graph &G, // vector<IndexType> &L, // vector<bool> &used_bp_roots); inline void batch_process( const Graph &G, idi b_id, idi roots_start, // start id of roots inti roots_size, // how many roots in the batch vector<IndexType> &L, const vector<uint8_t> &used_bp_roots, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &candidate_queue, idi &end_candidate_queue, vector<ShortIndex> &short_index, vector< vector<smalli> > &dist_matrix, vector<uint8_t> &got_candidates, vector<uint8_t> &is_active, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, vector<uint8_t> &once_candidated); // inline void batch_process( // const Graph &G, // idi b_id, // idi root_start, // inti roots_size, // vector<IndexType> &L, // const vector<bool> &used_bp_roots); inline void initialize( vector<ShortIndex> &short_index, vector< vector<smalli> > &dist_matrix, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, idi b_id, idi roots_start, inti roots_size, vector<IndexType> &L, const vector<uint8_t> &used_bp_roots); inline void push_labels( idi v_head, idi roots_start, const Graph &G, const vector<IndexType> &L, vector<ShortIndex> &short_index, // vector<idi> &candidate_queue, // idi &end_candidate_queue, vector<idi> &tmp_candidate_queue, idi &size_tmp_candidate_queue, idi &offset_tmp_candidate_queue, // vector<bool> &got_candidates, vector<uint8_t> &got_candidates, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, const vector<uint8_t> &used_bp_roots, smalli iter); inline bool distance_query( idi cand_root_id, idi v_id, idi roots_start, const vector<IndexType> &L, const vector< vector<smalli> > &dist_matrix, smalli iter); inline void insert_label_only( idi cand_root_id, idi v_id, idi roots_start, inti roots_size, vector<IndexType> &L, vector< vector<smalli> > &dist_matrix, smalli iter); inline void update_label_indices( idi v_id, idi inserted_count, vector<IndexType> &L, vector<ShortIndex> &short_index, idi b_id, smalli iter); inline void reset_at_end( idi roots_start, inti roots_size, vector<IndexType> &L, vector< vector<smalli> > &dist_matrix); // Some parallel interfaces inline idi prefix_sum_for_offsets( vector<idi> &offsets); template <typename T> inline void collect_into_queue( vector<T> &tmp_queue, vector<idi> &offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue vector<idi> &offsets_queue, // the locations in queue for writing into queue. idi num_elements, // total number of elements which need to be added from tmp_queue to queue vector<T> &queue, idi &end_queue); template <typename T, typename Int> inline void TS_enqueue( vector<T> &queue, Int &end_queue, const T &e); // Test only // uint64_t normal_hit_count = 0; // uint64_t bp_hit_count = 0; // uint64_t total_check_count = 0; // double initializing_time = 0; // double candidating_time = 0; // double adding_time = 0; // double distance_query_time = 0; // double init_index_time = 0; // double init_dist_matrix_time = 0; // double init_start_reset_time = 0; // double init_indicators_time = 0; //#ifdef PROFILE // vector<double> thds_adding_time = vector<double>(80, 0.0); // vector<uint64_t> thds_adding_count = vector<uint64_t>(80, 0); // L2CacheMissRate cache_miss; //#endif // End test public: ParaVertexCentricPLL() = default; ParaVertexCentricPLL(const Graph &G); weighti query( idi u, idi v); void print(); void switch_labels_to_old_id( const vector<idi> &rank2id, const vector<idi> &rank); void store_index_to_file( const char *filename, const vector<idi> &rank); void load_index_from_file( const char *filename); void order_labels( const vector<idi> &rank2id, const vector<idi> &rank); weighti query_distance( idi a, idi b); }; // class ParaVertexCentricPLL template <inti BATCH_SIZE> const inti ParaVertexCentricPLL<BATCH_SIZE>::BITPARALLEL_SIZE; template <inti BATCH_SIZE> ParaVertexCentricPLL<BATCH_SIZE>::ParaVertexCentricPLL(const Graph &G) { construct(G); } template <inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::bit_parallel_labeling( const Graph &G, vector<IndexType> &L, vector<uint8_t> &used_bp_roots) // CAS needs array { idi num_v = G.get_num_v(); idi num_e = G.get_num_e(); if (num_v <= BITPARALLEL_SIZE) { // if (true) {} // Sequential version std::vector<weighti> tmp_d(num_v); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<idi> que(num_v); // active queue std::vector<std::pair<idi, idi> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<idi, idi> > child_es(num_e); // child and father, their distances to the root have difference of 1. idi r = 0; // root r for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { while (r < num_v && used_bp_roots[r]) { ++r; } if (r == num_v) { for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = SMALLI_MAX; } continue; } used_bp_roots[r] = 1; fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); idi que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; int ns = 0; // number of selected neighbor, default 64 // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) {} idi d_i_bound = G.out_degrees[r]; idi i_start = G.vertices[r] + d_i_bound - 1; for (idi d_i = 0; d_i < d_i_bound; ++d_i) { idi i = i_start - d_i; idi v = G.out_edges[i]; if (!used_bp_roots[v]) { used_bp_roots[v] = 1; // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; if (++ns == 64) break; } } for (weighti d = 0; que_t0 < que_h; ++d) { idi num_sibling_es = 0, num_child_es = 0; for (idi que_i = que_t0; que_i < que_t1; ++que_i) { idi v = que[que_i]; idi i_start = G.vertices[v]; idi i_bound = i_start + G.out_degrees[v]; for (idi i = i_start; i < i_bound; ++i) { idi tv = G.out_edges[i]; weighti td = d + 1; if (d > tmp_d[tv]) { ; } else if (d == tmp_d[tv]) { if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. sibling_es[num_sibling_es].first = v; sibling_es[num_sibling_es].second = tv; ++num_sibling_es; // tmp_s[v].second |= tmp_s[tv].first; // tmp_s[tv].second |= tmp_s[v].first; } } else { // d < tmp_d[tv] if (tmp_d[tv] == SMALLI_MAX) { que[que_h++] = tv; tmp_d[tv] = td; } child_es[num_child_es].first = v; child_es[num_child_es].second = tv; ++num_child_es; // tmp_s[tv].first |= tmp_s[v].first; // tmp_s[tv].second |= tmp_s[v].second; } } } for (idi i = 0; i < num_sibling_es; ++i) { idi v = sibling_es[i].first, w = sibling_es[i].second; tmp_s[v].second |= tmp_s[w].first; tmp_s[w].second |= tmp_s[v].first; } for (idi i = 0; i < num_child_es; ++i) { idi v = child_es[i].first, c = child_es[i].second; tmp_s[c].first |= tmp_s[v].first; tmp_s[c].second |= tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = tmp_d[v]; L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } else { // Parallel version: Naive parallel enqueue std::vector<weighti> tmp_d(num_v); // distances from the root to every v std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} std::vector<idi> que(num_v); // active queue std::vector<std::pair<idi, idi> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) std::vector<std::pair<idi, idi> > child_es(num_e); // child and father, their distances to the root have difference of 1. idi r = 0; // root r for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { while (r < num_v && used_bp_roots[r]) { ++r; } if (r == num_v) { for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = SMALLI_MAX; } continue; } used_bp_roots[r] = 1; fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); idi que_t0 = 0, que_t1 = 0, que_h = 0; que[que_h++] = r; tmp_d[r] = 0; que_t1 = que_h; int ns = 0; // number of selected neighbor, default 64 // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // There was a bug cost countless time: the unsigned iterator i might decrease to zero and then flip to the INF. // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) {} idi d_i_bound = G.out_degrees[r]; idi i_start = G.vertices[r] + d_i_bound - 1; for (idi d_i = 0; d_i < d_i_bound; ++d_i) { idi i = i_start - d_i; idi v = G.out_edges[i]; if (!used_bp_roots[v]) { used_bp_roots[v] = 1; // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) que[que_h++] = v; tmp_d[v] = 1; tmp_s[v].first = 1ULL << ns; if (++ns == 64) break; } } for (weighti d = 0; que_t0 < que_h; ++d) { idi num_sibling_es = 0, num_child_es = 0; for (idi que_i = que_t0; que_i < que_t1; ++que_i) { idi v = que[que_i]; idi i_start = G.vertices[v]; idi i_bound = i_start + G.out_degrees[v]; for (idi i = i_start; i < i_bound; ++i) { idi tv = G.out_edges[i]; weighti td = d + 1; if (d > tmp_d[tv]) { ; } else if (d == tmp_d[tv]) { if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. sibling_es[num_sibling_es].first = v; sibling_es[num_sibling_es].second = tv; ++num_sibling_es; // tmp_s[v].second |= tmp_s[tv].first; // tmp_s[tv].second |= tmp_s[v].first; } } else { // d < tmp_d[tv] if (tmp_d[tv] == SMALLI_MAX) { que[que_h++] = tv; tmp_d[tv] = td; } child_es[num_child_es].first = v; child_es[num_child_es].second = tv; ++num_child_es; // tmp_s[tv].first |= tmp_s[v].first; // tmp_s[tv].second |= tmp_s[v].second; } } } for (idi i = 0; i < num_sibling_es; ++i) { idi v = sibling_es[i].first, w = sibling_es[i].second; tmp_s[v].second |= tmp_s[w].first; tmp_s[w].second |= tmp_s[v].first; } for (idi i = 0; i < num_child_es; ++i) { idi v = child_es[i].first, c = child_es[i].second; tmp_s[c].first |= tmp_s[v].first; tmp_s[c].second |= tmp_s[v].second; } que_t0 = que_t1; que_t1 = que_h; } #pragma omp parallel for for (idi v = 0; v < num_v; ++v) { L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets_0[i_bpspt] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets_1[i_bpspt] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} } } } } //inline void ParaVertexCentricPLL::bit_parallel_labeling( // const Graph &G, // vector<IndexType> &L, // vector<uint8_t> &used_bp_roots) //{ // idi num_v = G.get_num_v(); // idi num_e = G.get_num_e(); // //// std::vector<smalli> tmp_d(num_v); // distances from the root to every v // smalli *tmp_d = (smalli *) malloc(num_v * sizeof(smalli)); // std::vector<std::pair<uint64_t, uint64_t> > tmp_s(num_v); // first is S_r^{-1}, second is S_r^{0} // std::vector<idi> que(num_v); // active queue // std::vector< std::pair<idi, idi> > sibling_es(num_e); // siblings, their distances to the root are equal (have difference of 0) // std::vector< std::pair<idi, idi> > child_es(num_e); // child and father, their distances to the root have difference of 1. // // idi r = 0; // root r // for (inti i_bpspt = 0; i_bpspt < BITPARALLEL_SIZE; ++i_bpspt) { // while (r < num_v && used_bp_roots[r]) { // ++r; // } // if (r == num_v) { // for (idi v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = SMALLI_MAX; // } // continue; // } // used_bp_roots[r] = 1; // //// fill(tmp_d.begin(), tmp_d.end(), SMALLI_MAX); // memset(tmp_d, (uint8_t) -1, num_v * sizeof(smalli)); // fill(tmp_s.begin(), tmp_s.end(), std::make_pair(0, 0)); // // idi que_t0 = 0, que_t1 = 0, que_h = 0; // que[que_h++] = r; // tmp_d[r] = 0; // que_t1 = que_h; // // int ns = 0; // number of selected neighbor, default 64 // // the edge of one vertex in G is ordered decreasingly to rank, lower rank first, so here need to traverse edges backward // idi i_bound = G.vertices[r] - 1; // idi i_start = i_bound + G.out_degrees[r]; // for (idi i = i_start; i > i_bound; --i) { // idi v = G.out_edges[i]; // if (!used_bp_roots[v]) { // used_bp_roots[v] = 1; // // Algo3:line4: for every v in S_r, (dist[v], S_r^{-1}[v], S_r^{0}[v]) <- (1, {v}, empty_set) // que[que_h++] = v; // tmp_d[v] = 1; // tmp_s[v].first = 1ULL << ns; // if (++ns == 64) break; // } // } // // for (smalli d = 0; que_t0 < que_h; ++d) { // idi num_sibling_es = 0, num_child_es = 0; // // // For parallel adding to que // idi que_size = que_t1 - que_t0; // vector<idi> offsets_tmp_queue(que_size); //#pragma omp parallel for // for (idi i_q = 0; i_q < que_size; ++i_q) { // offsets_tmp_queue[i_q] = G.out_degrees[que[que_t0 + i_q]]; // } // idi num_neighbors = prefix_sum_for_offsets(offsets_tmp_queue); // vector<idi> tmp_que(num_neighbors); // vector<idi> sizes_tmp_que(que_size, 0); // // For parallel adding to sibling_es // vector< pair<idi, idi> > tmp_sibling_es(num_neighbors); // vector<idi> sizes_tmp_sibling_es(que_size, 0); // // For parallel adding to child_es // vector< pair<idi, idi> > tmp_child_es(num_neighbors); // vector<idi> sizes_tmp_child_es(que_size, 0); // //#pragma omp parallel for // for (idi que_i = que_t0; que_i < que_t1; ++que_i) { // idi tmp_que_i = que_i - que_t0; // location in the tmp_que // idi v = que[que_i]; // idi i_start = G.vertices[v]; // idi i_bound = i_start + G.out_degrees[v]; // for (idi i = i_start; i < i_bound; ++i) { // idi tv = G.out_edges[i]; // smalli td = d + 1; // // if (d > tmp_d[tv]) { // ; // } // else if (d == tmp_d[tv]) { // if (v < tv) { // ??? Why need v < tv !!! Because it's a undirected graph. // idi &size_in_group = sizes_tmp_sibling_es[tmp_que_i]; // tmp_sibling_es[offsets_tmp_queue[tmp_que_i] + size_in_group].first = v; // tmp_sibling_es[offsets_tmp_queue[tmp_que_i] + size_in_group].second = tv; // ++size_in_group; //// sibling_es[num_sibling_es].first = v; //// sibling_es[num_sibling_es].second = tv; //// ++num_sibling_es; // } // } else { // d < tmp_d[tv] // if (tmp_d[tv] == SMALLI_MAX) { // if (CAS(tmp_d + tv, SMALLI_MAX, td)) { // tmp_d[tv] = td // tmp_que[offsets_tmp_queue[tmp_que_i] + sizes_tmp_que[tmp_que_i]++] = tv; // } // } //// if (tmp_d[tv] == SMALLI_MAX) { //// que[que_h++] = tv; //// tmp_d[tv] = td; //// } // idi &size_in_group = sizes_tmp_child_es[tmp_que_i]; // tmp_child_es[offsets_tmp_queue[tmp_que_i] + size_in_group].first = v; // tmp_child_es[offsets_tmp_queue[tmp_que_i] + size_in_group].second = tv; // ++size_in_group; //// child_es[num_child_es].first = v; //// child_es[num_child_es].second = tv; //// ++num_child_es; // } // } // } // // // From tmp_sibling_es to sibling_es // idi total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_sibling_es); // collect_into_queue( // tmp_sibling_es, // offsets_tmp_queue, // sizes_tmp_sibling_es, // total_sizes_tmp_queue, // sibling_es, // num_sibling_es); // //#pragma omp parallel for // for (idi i = 0; i < num_sibling_es; ++i) { // idi v = sibling_es[i].first, w = sibling_es[i].second; // __sync_or_and_fetch(&tmp_s[v].second, tmp_s[w].first); // __sync_or_and_fetch(&tmp_s[w].second, tmp_s[v].first); //// tmp_s[v].second |= tmp_s[w].first; //// tmp_s[w].second |= tmp_s[v].first; // } // // // From tmp_child_es to child_es // total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_child_es); // collect_into_queue( // tmp_child_es, // offsets_tmp_queue, // sizes_tmp_child_es, // total_sizes_tmp_queue, // child_es, // num_child_es); // //#pragma omp parallel for // for (idi i = 0; i < num_child_es; ++i) { // idi v = child_es[i].first, c = child_es[i].second; // __sync_or_and_fetch(&tmp_s[c].first, tmp_s[v].first); // __sync_or_and_fetch(&tmp_s[c].second, tmp_s[v].second); //// tmp_s[c].first |= tmp_s[v].first; //// tmp_s[c].second |= tmp_s[v].second; // } // // // From tmp_que to que // total_sizes_tmp_queue = prefix_sum_for_offsets(sizes_tmp_que); // collect_into_queue( // tmp_que, // offsets_tmp_queue, // sizes_tmp_que, // total_sizes_tmp_queue, // que, // que_h); // // que_t0 = que_t1; // que_t1 = que_h; // } // //#pragma omp parallel for // for (idi v = 0; v < num_v; ++v) { // L[v].bp_dist[i_bpspt] = tmp_d[v]; // L[v].bp_sets[i_bpspt][0] = tmp_s[v].first; // S_r^{-1} // L[v].bp_sets[i_bpspt][1] = tmp_s[v].second & ~tmp_s[v].first; // Only need those r's neighbors who are not already in S_r^{-1} // } // } // // free(tmp_d); //} // Function for initializing at the begin of a batch // For a batch, initialize the temporary labels and real labels of roots; // traverse roots' labels to initialize distance buffer; // unset flag arrays is_active and got_labels template <inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::initialize( vector<ShortIndex> &short_index, vector< vector<smalli> > &dist_matrix, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, idi b_id, idi roots_start, inti roots_size, vector<IndexType> &L, const vector<uint8_t> &used_bp_roots) { idi roots_bound = roots_start + roots_size; // init_start_reset_time -= WallTimer::get_time_mark(); // TODO: parallel enqueue { // active_queue for (idi r_real_id = roots_start; r_real_id < roots_bound; ++r_real_id) { if (!used_bp_roots[r_real_id]) { active_queue[end_active_queue++] = r_real_id; } } } // init_start_reset_time += WallTimer::get_time_mark(); // init_index_time -= WallTimer::get_time_mark(); // Short_index { // init_indicators_time -= WallTimer::get_time_mark(); if (end_once_candidated_queue >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { idi v = once_candidated_queue[v_i]; short_index[v].indicator.reset(); once_candidated[v] = 0; } } else { for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { idi v = once_candidated_queue[v_i]; short_index[v].indicator.reset(); once_candidated[v] = 0; } } //#pragma omp parallel for // for (idi v_i = 0; v_i < end_once_candidated_queue; ++v_i) { // idi v = once_candidated_queue[v_i]; // short_index[v].indicator.reset(); // once_candidated[v] = 0; // } end_once_candidated_queue = 0; if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi v = roots_start; v < roots_bound; ++v) { if (!used_bp_roots[v]) { short_index[v].indicator.set(v - roots_start); short_index[v].indicator.set(BATCH_SIZE); // v got labels } } } else { for (idi v = roots_start; v < roots_bound; ++v) { if (!used_bp_roots[v]) { short_index[v].indicator.set(v - roots_start); short_index[v].indicator.set(BATCH_SIZE); // v got labels } } } // for (idi v = roots_start; v < roots_bound; ++v) { // if (!used_bp_roots[v]) { // short_index[v].indicator.set(v - roots_start); // short_index[v].indicator.set(BATCH_SIZE); // v got labels // } // } // init_indicators_time += WallTimer::get_time_mark(); } // // Real Index { if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; Lr.batches.push_back(IndexType::Batch( b_id, // Batch ID Lr.distances.size(), // start_index 1)); // size Lr.distances.push_back(IndexType::DistanceIndexType( Lr.vertices.size(), // start_index 1, // size 0)); // dist Lr.vertices.push_back(r_id); } } else { for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; Lr.batches.push_back(IndexType::Batch( b_id, // Batch ID Lr.distances.size(), // start_index 1)); // size Lr.distances.push_back(IndexType::DistanceIndexType( Lr.vertices.size(), // start_index 1, // size 0)); // dist Lr.vertices.push_back(r_id); } } // for (idi r_id = 0; r_id < roots_size; ++r_id) { // if (used_bp_roots[r_id + roots_start]) { // continue; // } // IndexType &Lr = L[r_id + roots_start]; // Lr.batches.push_back(IndexType::Batch( // b_id, // Batch ID // Lr.distances.size(), // start_index // 1)); // size // Lr.distances.push_back(IndexType::DistanceIndexType( // Lr.vertices.size(), // start_index // 1, // size // 0)); // dist // Lr.vertices.push_back(r_id); // } } // init_index_time += WallTimer::get_time_mark(); // init_dist_matrix_time -= WallTimer::get_time_mark(); // Dist_matrix { if (roots_size >= THRESHOLD_PARALLEL) { // schedule dynamic is slower #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; inti b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lr.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lr.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lr.distances[dist_i].size; smalli dist = Lr.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = dist; } } } } } else { inti b_i_bound; idi id_offset; idi dist_start_index; idi dist_bound_index; idi v_start_index; idi v_bound_index; smalli dist; for (idi r_id = 0; r_id < roots_size; ++r_id) { if (used_bp_roots[r_id + roots_start]) { continue; } IndexType &Lr = L[r_id + roots_start]; b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; dist_start_index = Lr.batches[b_i].start_index; dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { v_start_index = Lr.distances[dist_i].start_index; v_bound_index = v_start_index + Lr.distances[dist_i].size; dist = Lr.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = dist; } } } } } // inti b_i_bound; // idi id_offset; // idi dist_start_index; // idi dist_bound_index; // idi v_start_index; // idi v_bound_index; // smalli dist; // for (idi r_id = 0; r_id < roots_size; ++r_id) { // if (used_bp_roots[r_id + roots_start]) { // continue; // } // IndexType &Lr = L[r_id + roots_start]; // b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (inti b_i = 0; b_i < b_i_bound; ++b_i) { // id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // dist_start_index = Lr.batches[b_i].start_index; // dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_matrix // for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // v_start_index = Lr.distances[dist_i].start_index; // v_bound_index = v_start_index + Lr.distances[dist_i].size; // dist = Lr.distances[dist_i].dist; // for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = dist; // } // } // } // } } // init_dist_matrix_time += WallTimer::get_time_mark(); } // Function that pushes v_head's labels to v_head's every neighbor template <inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::push_labels( idi v_head, idi roots_start, const Graph &G, const vector<IndexType> &L, vector<ShortIndex> &short_index, // vector<idi> &candidate_queue, // idi &end_candidate_queue, vector<idi> &tmp_candidate_queue, idi &size_tmp_candidate_queue, idi &offset_tmp_queue, // vector<bool> &got_candidates, vector<uint8_t> &got_candidates, // vector<idi> &once_candidated_queue, // idi &end_once_candidated_queue, vector<idi> &tmp_once_candidated_queue, idi &size_tmp_once_candidated_queue, // vector<bool> &once_candidated, vector<uint8_t> &once_candidated, const vector<uint8_t> &used_bp_roots, smalli iter) { const IndexType &Lv = L[v_head]; // These 2 index are used for traversing v_head's last inserted labels idi l_i_start = Lv.distances.rbegin() -> start_index; idi l_i_bound = l_i_start + Lv.distances.rbegin() -> size; // Traverse v_head's every neighbor v_tail idi e_i_start = G.vertices[v_head]; idi e_i_bound = e_i_start + G.out_degrees[v_head]; for (idi e_i = e_i_start; e_i < e_i_bound; ++e_i) { idi v_tail = G.out_edges[e_i]; if (used_bp_roots[v_head]) { continue; } if (v_tail < roots_start) { // v_tail has higher rank than any roots, then no roots can push new labels to it. return; } // if (v_tail <= Lv.vertices[l_i_start] + roots_start) { // v_tail has higher rank than any v_head's labels // return; // } // This condition cannot be used anymore since v_head's last inserted labels are not ordered from higher rank to lower rank now, because v_head's candidate set is a queue now rather than a bitmap. For a queue, its order of candidates are not ordered by ranks. const IndexType &L_tail = L[v_tail]; _mm_prefetch(&L_tail.bp_dist[0], _MM_HINT_T0); _mm_prefetch(&L_tail.bp_sets[0][0], _MM_HINT_T0); // Traverse v_head's last inserted labels for (idi l_i = l_i_start; l_i < l_i_bound; ++l_i) { inti label_root_id = Lv.vertices[l_i]; idi label_real_id = label_root_id + roots_start; if (v_tail <= label_real_id) { // v_tail has higher rank than all remaining labels // For candidates_que, this is not true any more! // break; continue; } ShortIndex &SI_v_tail = short_index[v_tail]; if (SI_v_tail.indicator[label_root_id]) { // The label is already selected before continue; } // Record label_root_id as once selected by v_tail SI_v_tail.indicator.set(label_root_id); // Add into once_candidated_queue if (!once_candidated[v_tail]) { // If v_tail is not in the once_candidated_queue yet, add it in if (CAS(&once_candidated[v_tail], (uint8_t) 0, (uint8_t) 1)) { tmp_once_candidated_queue[offset_tmp_queue + size_tmp_once_candidated_queue++] = v_tail; } } // CHANGED! // Bit Parallel Checking: if label_real_id to v_tail has shorter distance already // ++total_check_count; const IndexType &L_label = L[label_real_id]; bool no_need_add = false; _mm_prefetch(&L_label.bp_dist[0], _MM_HINT_T0); _mm_prefetch(&L_label.bp_sets[0][0], _MM_HINT_T0); for (inti i = 0; i < BITPARALLEL_SIZE; ++i) { inti td = L_label.bp_dist[i] + L_tail.bp_dist[i]; if (td - 2 <= iter) { td += (L_label.bp_sets[i][0] & L_tail.bp_sets[i][0]) ? -2 : ((L_label.bp_sets[i][0] & L_tail.bp_sets[i][1]) | (L_label.bp_sets[i][1] & L_tail.bp_sets[i][0])) ? -1 : 0; if (td <= iter) { no_need_add = true; // ++bp_hit_count; break; } } } if (no_need_add) { continue; } // Record vertex label_root_id as v_tail's candidates label // SI_v_tail.candidates.set(label_root_id); // if (!SI_v_tail.is_candidate[label_root_id]) { // SI_v_tail.is_candidate[label_root_id] = true; // SI_v_tail.candidates_que[SI_v_tail.end_candidates_que++] = label_root_id; // } if (!SI_v_tail.is_candidate[label_root_id]) { if (CAS(&SI_v_tail.is_candidate[label_root_id], (uint8_t) 0, (uint8_t) 1)) { TS_enqueue(SI_v_tail.candidates_que, SI_v_tail.end_candidates_que, label_root_id); // volatile inti old_v = SI_v_tail.end_candidates_que; // volatile inti new_v = old_v + 1; // while (!CAS(&SI_v_tail.end_candidates_que, old_v, new_v)) { // old_v = SI_v_tail.end_candidates_que; // new_v = old_v + 1; // } // SI_v_tail.candidates_que[old_v] = label_root_id; } } // Add into candidate_queue if (!got_candidates[v_tail]) { // If v_tail is not in candidate_queue, add it in (prevent duplicate) if (CAS(&got_candidates[v_tail], (uint8_t) 0, (uint8_t) 1)) { tmp_candidate_queue[offset_tmp_queue + size_tmp_candidate_queue++] = v_tail; } } // // Add into once_candidated_queue //#pragma omp critical // if (!once_candidated[v_tail]) { // // If v_tail is not in the once_candidated_queue yet, add it in // once_candidated[v_tail] = true; // once_candidated_queue[end_once_candidated_queue++] = v_tail; // } // // Add into candidate_queue // if (!got_candidates[v_tail]) { // // If v_tail is not in candidate_queue, add it in (prevent duplicate) // got_candidates[v_tail] = true; // candidate_queue[end_candidate_queue++] = v_tail; // } } } // printf("v_head: %u, size_tmp_candidate_queue: %u\n", v_head, size_tmp_candidate_queue);//test } // Function for distance query; // traverse vertex v_id's labels; // return the distance between v_id and cand_root_id based on existing labels. // return false if shorter distance exists already, return true if the cand_root_id can be added into v_id's label. template <inti BATCH_SIZE> inline bool ParaVertexCentricPLL<BATCH_SIZE>::distance_query( idi cand_root_id, idi v_id, idi roots_start, const vector<IndexType> &L, const vector< vector<smalli> > &dist_matrix, smalli iter) { // ++total_check_count; // distance_query_time -= WallTimer::get_time_mark(); idi cand_real_id = cand_root_id + roots_start; const IndexType &Lv = L[v_id]; // Traverse v_id's all existing labels inti b_i_bound = Lv.batches.size(); _mm_prefetch(&Lv.batches[0], _MM_HINT_T0); _mm_prefetch(&Lv.distances[0], _MM_HINT_T0); _mm_prefetch(&Lv.vertices[0], _MM_HINT_T0); _mm_prefetch(&dist_matrix[cand_root_id][0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { inti dist = Lv.distances[dist_i].dist; if (dist >= iter) { // In a batch, the labels' distances are increasingly ordered. // If the half path distance is already greater than their targeted distance, jump to next batch break; } idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; // _mm_prefetch(&dist_matrix[cand_root_id][0], _MM_HINT_T0); for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi v = Lv.vertices[v_i] + id_offset; // v is a label hub of v_id if (v >= cand_real_id) { // Vertex cand_real_id cannot have labels whose ranks are lower than it, // in which case dist_matrix[cand_root_id][v] does not exit. continue; } inti d_tmp = dist + dist_matrix[cand_root_id][v]; if (d_tmp <= iter) { // distance_query_time += WallTimer::get_time_mark(); // ++normal_hit_count; return false; } } } } // distance_query_time += WallTimer::get_time_mark(); return true; } // Function inserts candidate cand_root_id into vertex v_id's labels; // update the distance buffer dist_matrix; // but it only update the v_id's labels' vertices array; template <inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::insert_label_only( idi cand_root_id, idi v_id, idi roots_start, inti roots_size, vector<IndexType> &L, vector< vector<smalli> > &dist_matrix, smalli iter) { L[v_id].vertices.push_back(cand_root_id); // Update the distance buffer if necessary idi v_root_id = v_id - roots_start; if (v_id >= roots_start && v_root_id < roots_size) { dist_matrix[v_root_id][cand_root_id + roots_start] = iter; } } // Function updates those index arrays in v_id's label only if v_id has been inserted new labels template <inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::update_label_indices( idi v_id, idi inserted_count, vector<IndexType> &L, vector<ShortIndex> &short_index, idi b_id, smalli iter) { IndexType &Lv = L[v_id]; // indicator[BATCH_SIZE + 1] is true, means v got some labels already in this batch if (short_index[v_id].indicator[BATCH_SIZE]) { // Increase the batches' last element's size because a new distance element need to be added ++(Lv.batches.rbegin() -> size); } else { short_index[v_id].indicator.set(BATCH_SIZE); // Insert a new Batch with batch_id, start_index, and size because a new distance element need to be added Lv.batches.push_back(IndexType::Batch( b_id, Lv.distances.size(), 1)); } // Insert a new distance element with start_index, size, and dist Lv.distances.push_back(IndexType::DistanceIndexType( Lv.vertices.size() - inserted_count, inserted_count, iter)); } // Function to reset dist_matrix the distance buffer to INF // Traverse every root's labels to reset its distance buffer elements to INF. // In this way to reduce the cost of initialization of the next batch. template <inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::reset_at_end( idi roots_start, inti roots_size, vector<IndexType> &L, vector< vector<smalli> > &dist_matrix) { if (roots_size >= THRESHOLD_PARALLEL) { #pragma omp parallel for for (idi r_id = 0; r_id < roots_size; ++r_id) { IndexType &Lr = L[r_id + roots_start]; inti b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { idi id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lr.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lr.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lr.distances[dist_i].size; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = SMALLI_MAX; } } } } } else { inti b_i_bound; idi id_offset; idi dist_start_index; idi dist_bound_index; idi v_start_index; idi v_bound_index; for (idi r_id = 0; r_id < roots_size; ++r_id) { IndexType &Lr = L[r_id + roots_start]; b_i_bound = Lr.batches.size(); _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); for (inti b_i = 0; b_i < b_i_bound; ++b_i) { id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; dist_start_index = Lr.batches[b_i].start_index; dist_bound_index = dist_start_index + Lr.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { v_start_index = Lr.distances[dist_i].start_index; v_bound_index = v_start_index + Lr.distances[dist_i].size; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = SMALLI_MAX; } } } } } // inti b_i_bound; // idi id_offset; // idi dist_start_index; // idi dist_bound_index; // idi v_start_index; // idi v_bound_index; // for (idi r_id = 0; r_id < roots_size; ++r_id) { // IndexType &Lr = L[r_id + roots_start]; // b_i_bound = Lr.batches.size(); // _mm_prefetch(&Lr.batches[0], _MM_HINT_T0); // _mm_prefetch(&Lr.distances[0], _MM_HINT_T0); // _mm_prefetch(&Lr.vertices[0], _MM_HINT_T0); // for (inti b_i = 0; b_i < b_i_bound; ++b_i) { // id_offset = Lr.batches[b_i].batch_id * BATCH_SIZE; // dist_start_index = Lr.batches[b_i].start_index; // dist_bound_index = dist_start_index + Lr.batches[b_i].size; // // Traverse dist_matrix // for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { // v_start_index = Lr.distances[dist_i].start_index; // v_bound_index = v_start_index + Lr.distances[dist_i].size; // for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { // dist_matrix[r_id][Lr.vertices[v_i] + id_offset] = SMALLI_MAX; // } // } // } // } } template <inti BATCH_SIZE> inline void ParaVertexCentricPLL<BATCH_SIZE>::batch_process( const Graph &G, idi b_id, idi roots_start, // start id of roots inti roots_size, // how many roots in the batch vector<IndexType> &L, const vector<uint8_t> &used_bp_roots, vector<idi> &active_queue, idi &end_active_queue, vector<idi> &candidate_queue, idi &end_candidate_queue, vector<ShortIndex> &short_index, vector< vector<smalli> > &dist_matrix, vector<uint8_t> &got_candidates, vector<uint8_t> &is_active, vector<idi> &once_candidated_queue, idi &end_once_candidated_queue, vector<uint8_t> &once_candidated) //inline void ParaVertexCentricPLL::batch_process( // const Graph &G, // idi b_id, // idi roots_start, // start id of roots // inti roots_size, // how many roots in the batch // vector<IndexType> &L, // const vector<bool> &used_bp_roots) { // initializing_time -= WallTimer::get_time_mark(); // static const idi num_v = G.get_num_v(); // static vector<idi> active_queue(num_v); // static idi end_active_queue = 0; // static vector<idi> candidate_queue(num_v); // static idi end_candidate_queue = 0; // static vector<ShortIndex> short_index(num_v); // static vector< vector<smalli> > dist_matrix(roots_size, vector<smalli>(num_v, SMALLI_MAX)); // static uint8_t *got_candidates = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // static uint8_t *is_active = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // static vector<idi> once_candidated_queue(num_v); // The vertex who got some candidates in this batch is in the once_candidated_queue. // static idi end_once_candidated_queue = 0; // static uint8_t *once_candidated = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // At the beginning of a batch, initialize the labels L and distance buffer dist_matrix; // printf("initializing...\n");//test initialize( short_index, dist_matrix, active_queue, end_active_queue, once_candidated_queue, end_once_candidated_queue, once_candidated, b_id, roots_start, roots_size, L, used_bp_roots); smalli iter = 0; // The iterator, also the distance for current iteration // initializing_time += WallTimer::get_time_mark(); while (0 != end_active_queue) { // candidating_time -= WallTimer::get_time_mark(); ++iter; // Pushing // printf("pushing...\n");//test { // Prepare for parallel processing the active_queue and adding to candidate_queue. // Every vertex's offset location in tmp_candidate_queue // It's used for every thread to write into tmp_candidate_queue and tmp_once_candidated_queue vector<idi> offsets_tmp_queue(end_active_queue); #pragma omp parallel for for (idi i_queue = 0; i_queue < end_active_queue; ++i_queue) { // Traverse all active vertices, get their out degrees. offsets_tmp_queue[i_queue] = G.out_degrees[active_queue[i_queue]]; } idi num_neighbors = prefix_sum_for_offsets(offsets_tmp_queue); // every thread writes to tmp_candidate_queue at its offset location vector<idi> tmp_candidate_queue(num_neighbors); // A vector to store the true number of pushed neighbors of every active vertex. vector<idi> sizes_tmp_candidate_queue(end_active_queue, 0); // similarly, every thread writes to tmp_once_candidated_queue at its offset location vector<idi> tmp_once_candidated_queue(num_neighbors); // And store the true number of new added once-candidated vertices. vector<idi> sizes_tmp_once_candidated_queue(end_active_queue, 0); // Traverse active vertices to push their labels as candidates // schedule dynamic is slower #pragma omp parallel for for (idi i_queue = 0; i_queue < end_active_queue; ++i_queue) { idi v_head = active_queue[i_queue]; is_active[v_head] = 0; // reset is_active push_labels( v_head, roots_start, G, L, short_index, // candidate_queue, // end_candidate_queue, tmp_candidate_queue, sizes_tmp_candidate_queue[i_queue], offsets_tmp_queue[i_queue], got_candidates, // once_candidated_queue, // end_once_candidated_queue, tmp_once_candidated_queue, sizes_tmp_once_candidated_queue[i_queue], once_candidated, used_bp_roots, iter); } // According to sizes_tmp_candidate_queue, get the offset for inserting to the real queue idi total_new = prefix_sum_for_offsets(sizes_tmp_candidate_queue); // Collect all candidate vertices from tmp_candidate_queue into candidate_queue. collect_into_queue( tmp_candidate_queue, offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue sizes_tmp_candidate_queue, // the locations in queue for writing into queue. total_new, // total number of elements which need to be added from tmp_queue to queue candidate_queue, end_candidate_queue); // Get the offset for inserting to the real queue. total_new = prefix_sum_for_offsets(sizes_tmp_once_candidated_queue); // Collect all once-candidated vertices from tmp_once_candidated_queue into once_candidated_queue collect_into_queue( tmp_once_candidated_queue, offsets_tmp_queue, sizes_tmp_once_candidated_queue, total_new, once_candidated_queue, end_once_candidated_queue); // printf("end_candidate_queue: %u\n", end_candidate_queue); fflush(stdout);//test end_active_queue = 0; // Set the active_queue empty } // candidating_time += WallTimer::get_time_mark(); if (end_candidate_queue == 0) { break; } // adding_time -= WallTimer::get_time_mark(); // Adding // printf("adding...\n");//test { // Prepare for parallel processing the candidate_queue and adding to active_queue. // Every vertex's offset location in tmp_active_queue is i_queue * roots_size // It's used for every thread to write into tmp_candidate_queue and tmp_once_candidated_queue vector<idi> offsets_tmp_queue(end_candidate_queue); #pragma omp parallel for for (idi i_queue = 0; i_queue < end_candidate_queue; ++i_queue) { // Traverse all active vertices, get their out degrees. // A ridiculous bug here. The v_id will, if any, only add itself to the active queue. //offsets_tmp_queue[i_queue] = i_queue * roots_size; offsets_tmp_queue[i_queue] = i_queue; } // every thread writes to tmp_candidate_queue at its offset location vector<idi> tmp_active_queue(end_candidate_queue); // A vector to store the true number of pushed neighbors of every active vertex. vector<idi> sizes_tmp_active_queue(end_candidate_queue, 0); // Traverse vertices in the candidate_queue to insert labels // Here schedule dynamic will be slower //#ifdef PROFILE // cache_miss.measure_start(); //#endif #pragma omp parallel for schedule(dynamic) for (idi i_queue = 0; i_queue < end_candidate_queue; ++i_queue) { //#ifdef PROFILE // inti tid = omp_get_thread_num(); // thds_adding_time[tid] -= WallTimer::get_time_mark(); //#endif idi v_id = candidate_queue[i_queue]; inti inserted_count = 0; //recording number of v_id's truly inserted candidates got_candidates[v_id] = 0; // reset got_candidates bool be_active = false; // flag, if be_active is ture, v_id needs to be marked as active (enqueue active_queue) // Traverse v_id's all candidates // for (inti cand_root_id = 0; cand_root_id < roots_size; ++cand_root_id) { // if (!short_index[v_id].candidates[cand_root_id]) { // // Root cand_root_id is not vertex v_id's candidate // continue; // } // short_index[v_id].candidates.reset(cand_root_id); inti bound_cand_i = short_index[v_id].end_candidates_que; for (inti cand_i = 0; cand_i < bound_cand_i; ++cand_i) { inti cand_root_id = short_index[v_id].candidates_que[cand_i]; short_index[v_id].is_candidate[cand_root_id] = 0; // Reset is_candidate // Only insert cand_root_id into v_id's label if its distance to v_id is shorter than existing distance if ( distance_query( cand_root_id, v_id, roots_start, L, dist_matrix, iter) ) { if (!be_active) { be_active = true; } //#ifdef PROFILE // ++thds_adding_count[tid]; //#endif // if (!is_active[v_id]) { // is_active[v_id] = true; // active_queue[end_active_queue++] = v_id; // } ++inserted_count; // The candidate cand_root_id needs to be added into v_id's label insert_label_only( cand_root_id, v_id, roots_start, roots_size, L, dist_matrix, iter); } } short_index[v_id].end_candidates_que = 0; // } if (be_active) { if (CAS(&is_active[v_id], (uint8_t) 0, (uint8_t) 1)) { tmp_active_queue[offsets_tmp_queue[i_queue] + sizes_tmp_active_queue[i_queue]++] = v_id; } } if (0 != inserted_count) { // Update other arrays in L[v_id] if new labels were inserted in this iteration update_label_indices( v_id, inserted_count, L, short_index, b_id, iter); } //#ifdef PROFILE // thds_adding_time[tid] += WallTimer::get_time_mark(); //#endif } //#ifdef PROFILE // cache_miss.measure_stop(); //#endif // According to sizes_tmp_active_queue, get the offset for inserting to the real queue idi total_new = prefix_sum_for_offsets(sizes_tmp_active_queue); // Collect all candidate vertices from tmp_candidate_queue into candidate_queue. collect_into_queue( tmp_active_queue, offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue sizes_tmp_active_queue, // the locations in queue for writing into queue. total_new, // total number of elements which need to be added from tmp_queue to queue active_queue, end_active_queue); end_candidate_queue = 0; // Set the candidate_queue empty } // adding_time += WallTimer::get_time_mark(); } // Reset the dist_matrix // initializing_time -= WallTimer::get_time_mark(); // init_dist_matrix_time -= WallTimer::get_time_mark(); reset_at_end( roots_start, roots_size, L, dist_matrix); // init_dist_matrix_time += WallTimer::get_time_mark(); // initializing_time += WallTimer::get_time_mark(); // double total_time = time_can + time_add; // printf("Candidating time: %f (%f%%)\n", time_can, time_can / total_time * 100); // printf("Adding time: %f (%f%%)\n", time_add, time_add / total_time * 100); } template <inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::construct(const Graph &G) { // initializing_time -= WallTimer::get_time_mark(); idi num_v = G.get_num_v(); num_v_ = num_v; L.resize(num_v); idi remainer = num_v % BATCH_SIZE; idi b_i_bound = num_v / BATCH_SIZE; // uint8_t *used_bp_roots = (uint8_t *) calloc(num_v, sizeof(uint8_t)); vector<uint8_t> used_bp_roots(num_v, 0); vector<idi> active_queue(num_v); idi end_active_queue = 0; vector<idi> candidate_queue(num_v); idi end_candidate_queue = 0; vector<ShortIndex> short_index(num_v); vector< vector<smalli> > dist_matrix(BATCH_SIZE, vector<smalli>(num_v, SMALLI_MAX)); // uint8_t *got_candidates = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. // uint8_t *is_active = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. vector<uint8_t> got_candidates(num_v, 0); vector<uint8_t> is_active(num_v, 0); vector<idi> once_candidated_queue(num_v); // The vertex who got some candidates in this batch is in the once_candidated_queue. idi end_once_candidated_queue = 0; // uint8_t *once_candidated = (uint8_t *) calloc(num_v, sizeof(uint8_t)); // need raw integer type to do CAS. vector<uint8_t> once_candidated(num_v, 0); // initializing_time += WallTimer::get_time_mark(); double time_labeling = -WallTimer::get_time_mark(); //double bp_labeling_time = -WallTimer::get_time_mark(); // printf("BP labeling...\n"); //test bit_parallel_labeling( G, L, used_bp_roots); //bp_labeling_time += WallTimer::get_time_mark(); for (idi b_i = 0; b_i < b_i_bound; ++b_i) { // printf("b_i: %u\n", b_i);//test batch_process( G, b_i, b_i * BATCH_SIZE, BATCH_SIZE, L, used_bp_roots, active_queue, end_active_queue, candidate_queue, end_candidate_queue, short_index, dist_matrix, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // batch_process( // G, // b_i, // b_i * BATCH_SIZE, // BATCH_SIZE, // L, // used_bp_roots); } if (remainer != 0) { // printf("b_i: %u the last batch\n", b_i_bound);//test batch_process( G, b_i_bound, b_i_bound * BATCH_SIZE, remainer, L, used_bp_roots, active_queue, end_active_queue, candidate_queue, end_candidate_queue, short_index, dist_matrix, got_candidates, is_active, once_candidated_queue, end_once_candidated_queue, once_candidated); // batch_process( // G, // b_i_bound, // b_i_bound * BATCH_SIZE, // remainer, // L, // used_bp_roots); } time_labeling += WallTimer::get_time_mark(); // free(got_candidates); // free(is_active); // free(once_candidated); // free(used_bp_roots); // Test printf("Threads: %u Batch_size: %u\n", NUM_THREADS, BATCH_SIZE); //printf("BP_labeling: %.2f %.2f%%\n", bp_labeling_time, bp_labeling_time / time_labeling * 100); printf("BP_Roots_Size: %u\n", BITPARALLEL_SIZE); // printf("Initializing: %.2f %.2f%%\n", initializing_time, initializing_time / time_labeling * 100); // printf("\tinit_start_reset_time: %f (%f%%)\n", init_start_reset_time, init_start_reset_time / initializing_time * 100); // printf("\tinit_index_time: %f (%f%%)\n", init_index_time, init_index_time / initializing_time * 100); // printf("\t\tinit_indicators_time: %f (%f%%)\n", init_indicators_time, init_indicators_time / init_index_time * 100); // printf("\tinit_dist_matrix_time: %f (%f%%)\n", init_dist_matrix_time, init_dist_matrix_time / initializing_time * 100); // printf("Candidating: %.2f %.2f%%\n", candidating_time, candidating_time / time_labeling * 100); // printf("Adding: %.2f %.2f%%\n", adding_time, adding_time / time_labeling * 100); // printf("\tdistance_query_time: %f (%f%%)\n", distance_query_time, distance_query_time / adding_time * 100); // printf("\ttotal_check_count: %llu\n", total_check_count); // printf("\tbp_hit_count (to total_check): %llu (%f%%)\n", // bp_hit_count, // bp_hit_count * 100.0 / total_check_count); // printf("\tnormal_hit_count (to total_check, to normal_check): %llu (%f%%, %f%%)\n", // normal_hit_count, // normal_hit_count * 100.0 / total_check_count, // normal_hit_count * 100.0 / (total_check_count - bp_hit_count)); #ifdef PROFILE uint64_t total_thds_adding_count = 0; double total_thds_adding_time = 0; for (inti tid = 0; tid < NUM_THREADS; ++tid) { total_thds_adding_count += thds_adding_count[tid]; total_thds_adding_time += thds_adding_time[tid]; } printf("Threads_adding_count:"); for (inti tid = 0; tid < NUM_THREADS; ++tid) { printf(" %lu(%.2f%%)", thds_adding_count[tid], thds_adding_count[tid] * 100.0 / total_thds_adding_count); } puts(""); printf("Threads_adding_time:"); for (inti tid = 0; tid < NUM_THREADS; ++tid) { printf(" %f(%.2f%%)", thds_adding_time[tid], thds_adding_time[tid] * 100.0 / total_thds_adding_time); } puts(""); //printf("Threads_adding_average_time:"); //for (inti tid = 0; tid < NUM_THREADS; ++tid) { // printf(" %f", thds_adding_time[tid] / thds_adding_count[tid]); //} puts(""); cache_miss.print(); #endif printf("Total_labeling_time: %.2f seconds\n", time_labeling); // End test } // Function to get the prefix sum of elements in offsets template <inti BATCH_SIZE> inline idi ParaVertexCentricPLL<BATCH_SIZE>::prefix_sum_for_offsets( vector<idi> &offsets) { idi size_offsets = offsets.size(); if (1 == size_offsets) { idi tmp = offsets[0]; offsets[0] = 0; return tmp; } else if (size_offsets < 2048) { idi offset_sum = 0; idi size = size_offsets; for (idi i = 0; i < size; ++i) { idi tmp = offsets[i]; offsets[i] = offset_sum; offset_sum += tmp; } return offset_sum; } else { // Parallel Prefix Sum, based on Guy E. Blelloch's Prefix Sums and Their Applications idi last_element = offsets[size_offsets - 1]; // idi size = 1 << ((idi) log2(size_offsets - 1) + 1); idi size = 1 << ((idi) log2(size_offsets)); // vector<idi> nodes(size, 0); idi tmp_element = offsets[size - 1]; //#pragma omp parallel for // for (idi i = 0; i < size_offsets; ++i) { // nodes[i] = offsets[i]; // } // Up-Sweep (Reduce) Phase idi log2size = log2(size); for (idi d = 0; d < log2size; ++d) { idi by = 1 << (d + 1); #pragma omp parallel for for (idi k = 0; k < size; k += by) { offsets[k + (1 << (d + 1)) - 1] += offsets[k + (1 << d) - 1]; } } // Down-Sweep Phase offsets[size - 1] = 0; for (idi d = log2(size) - 1; d != (idi) -1 ; --d) { idi by = 1 << (d + 1); #pragma omp parallel for for (idi k = 0; k < size; k += by) { idi t = offsets[k + (1 << d) - 1]; offsets[k + (1 << d) - 1] = offsets[k + (1 << (d + 1)) - 1]; offsets[k + (1 << (d + 1)) - 1] += t; } } //#pragma omp parallel for // for (idi i = 0; i < size_offsets; ++i) { // offsets[i] = nodes[i]; // } if (size != size_offsets) { idi tmp_sum = offsets[size - 1] + tmp_element; for (idi i = size; i < size_offsets; ++i) { idi t = offsets[i]; offsets[i] = tmp_sum; tmp_sum += t; } } return offsets[size_offsets - 1] + last_element; } // // Get the offset as the prefix sum of out degrees // idi offset_sum = 0; // idi size = offsets.size(); // for (idi i = 0; i < size; ++i) { // idi tmp = offsets[i]; // offsets[i] = offset_sum; // offset_sum += tmp; // } // return offset_sum; //// Parallel Prefix Sum, based on Guy E. Blelloch's Prefix Sums and Their Applications // idi size_offsets = offsets.size(); // idi last_element = offsets[size_offsets - 1]; //// idi size = 1 << ((idi) log2(size_offsets - 1) + 1); // idi size = 1 << ((idi) log2(size_offsets)); //// vector<idi> nodes(size, 0); // idi tmp_element = offsets[size - 1]; ////#pragma omp parallel for //// for (idi i = 0; i < size_offsets; ++i) { //// nodes[i] = offsets[i]; //// } // // // Up-Sweep (Reduce) Phase // idi log2size = log2(size); // for (idi d = 0; d < log2size; ++d) { // idi by = 1 << (d + 1); //#pragma omp parallel for // for (idi k = 0; k < size; k += by) { // offsets[k + (1 << (d + 1)) - 1] += offsets[k + (1 << d) - 1]; // } // } // // // Down-Sweep Phase // offsets[size - 1] = 0; // for (idi d = log2(size) - 1; d != (idi) -1 ; --d) { // idi by = 1 << (d + 1); //#pragma omp parallel for // for (idi k = 0; k < size; k += by) { // idi t = offsets[k + (1 << d) - 1]; // offsets[k + (1 << d) - 1] = offsets[k + (1 << (d + 1)) - 1]; // offsets[k + (1 << (d + 1)) - 1] += t; // } // } // ////#pragma omp parallel for //// for (idi i = 0; i < size_offsets; ++i) { //// offsets[i] = nodes[i]; //// } // if (size != offsets.size()) { // idi tmp_sum = offsets[size - 1] + tmp_element; // idi i_bound = offsets.size(); // for (idi i = size; i < i_bound; ++i) { // idi t = offsets[i]; // offsets[i] = tmp_sum; // tmp_sum += t; // } // } // // return offsets[size_offsets - 1] + last_element; } // Collect elements in the tmp_queue into the queue template <inti BATCH_SIZE> template <typename T> inline void ParaVertexCentricPLL<BATCH_SIZE>::collect_into_queue( // vector<idi> &tmp_queue, vector<T> &tmp_queue, vector<idi> &offsets_tmp_queue, // the locations in tmp_queue for writing from tmp_queue vector<idi> &offsets_queue, // the locations in queue for writing into queue. idi num_elements, // total number of elements which need to be added from tmp_queue to queue // vector<idi> &queue, vector<T> &queue, idi &end_queue) { if (0 == num_elements) { return; } idi i_bound = offsets_tmp_queue.size(); #pragma omp parallel for for (idi i = 0; i < i_bound; ++i) { idi i_q_start = end_queue + offsets_queue[i]; idi i_q_bound; if (i_bound - 1 != i) { i_q_bound = end_queue + offsets_queue[i + 1]; } else { i_q_bound = end_queue + num_elements; } if (i_q_start == i_q_bound) { // If the group has no elements to be added, then continue to the next group continue; } idi end_tmp = offsets_tmp_queue[i]; for (idi i_q = i_q_start; i_q < i_q_bound; ++i_q) { queue[i_q] = tmp_queue[end_tmp++]; } } end_queue += num_elements; } // Function: thread-save enqueue. The queue has enough size already. An index points the end of the queue. template <inti BATCH_SIZE> template <typename T, typename Int> inline void ParaVertexCentricPLL<BATCH_SIZE>::TS_enqueue( vector<T> &queue, Int &end_queue, const T &e) { volatile Int old_i = end_queue; volatile Int new_i = old_i + 1; while (!CAS(&end_queue, old_i, new_i)) { old_i = end_queue; new_i = old_i + 1; } queue[old_i] = e; } template <inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::store_index_to_file( const char *filename, const vector<idi> &rank) { ofstream fout(filename); if (!fout.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } std::string txt_filename = std::string(filename) + ".txt";//test ofstream txt_out(txt_filename.c_str()); // Store into file the number of vertices and the number of bit-parallel roots. uint64_t labels_count = 0; fout.write((char *) &num_v_, sizeof(num_v_)); fout.write((char *) &BITPARALLEL_SIZE, sizeof(BITPARALLEL_SIZE)); for (idi v_id = 0; v_id < num_v_; ++v_id) { idi v_rank = rank[v_id]; const IndexType &Lv = L[v_rank]; idi size_labels = Lv.vertices.size(); labels_count += size_labels; // Store Bit-parallel Labels into file. for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { weighti d = Lv.bp_dist[b_i]; uint64_t s0 = Lv.bp_sets[b_i][0]; uint64_t s1 = Lv.bp_sets[b_i][1]; fout.write((char *) &d, sizeof(d)); fout.write((char *) &s0, sizeof(s0)); fout.write((char *) &s1, sizeof(s1)); } vector< std::pair<idi, weighti> > ordered_labels; // Traverse v_id's all existing labels for (inti b_i = 0; b_i < Lv.batches.size(); ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; weighti dist = Lv.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi tail = Lv.vertices[v_i] + id_offset; ordered_labels.push_back(std::make_pair(tail, dist)); } } } // Sort sort(ordered_labels.begin(), ordered_labels.end()); // Store into file fout.write((char *) &size_labels, sizeof(size_labels)); // {//test // txt_out << v_id << ":"; // } for (idi l_i = 0; l_i < size_labels; ++l_i) { idi l = ordered_labels[l_i].first; weighti d = ordered_labels[l_i].second; fout.write((char *) &l, sizeof(l)); fout.write((char *) &d, sizeof(d)); {//test txt_out << v_id << " " << v_rank << ": " << l << " " << (idi) d << std::endl; } } // {//test // txt_out << std::endl; // } } printf("Label_size: %'lu mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v_); fout.close(); } template <inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::load_index_from_file( const char *filename) { std::ifstream fin(filename); if (!fin.is_open()) { fprintf(stderr, "Error: cannot open file %s\n", filename); exit(EXIT_FAILURE); } idi num_v; // Load from file the number of vertices and the number of bit-parallel roots. fin.read((char *) &num_v, sizeof(num_v)); fin.read((char *) &BITPARALLEL_SIZE, sizeof(BITPARALLEL_SIZE)); num_v_ = num_v; Index.resize(num_v); uint64_t labels_count = 0; // Load labels for every vertex for (idi v_id = 0; v_id < num_v; ++v_id) { IndexOrdered &Iv = Index[v_id]; // Load Bit-parallel Labels from file. for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { fin.read((char *) &Iv.bp_dist[b_i], sizeof(Iv.bp_dist[b_i])); fin.read((char *) &Iv.bp_sets[b_i][0], sizeof(Iv.bp_sets[b_i][0])); fin.read((char *) &Iv.bp_sets[b_i][1], sizeof(Iv.bp_sets[b_i][1])); } // Normal Labels // Load Labels from file. idi size_labels; fin.read((char *) &size_labels, sizeof(size_labels)); labels_count += size_labels; Iv.label_id.resize(size_labels + 1); Iv.label_dists.resize(size_labels + 1); for (idi l_i = 0; l_i < size_labels; ++l_i) { fin.read((char *) &Iv.label_id[l_i], sizeof(Iv.label_id[l_i])); fin.read((char *) &Iv.label_dists[l_i], sizeof(Iv.label_dists[l_i])); } Iv.label_id[size_labels] = num_v; // Sentinel Iv.label_dists[size_labels] = (weighti) -1; // Sentinel } printf("Label_size_loaded: %'lu mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v); fin.close(); } template <inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::order_labels( const vector<idi> &rank2id, const vector<idi> &rank) { idi num_v = rank.size(); vector< vector< pair<idi, weighti> > > ordered_L(num_v); idi labels_count = 0; Index.resize(num_v); // Traverse the L, put them into Index (ordered labels) for (idi v_id = 0; v_id < num_v; ++v_id) { idi new_v = rank2id[v_id]; IndexOrdered & Iv = Index[new_v]; const IndexType &Lv = L[v_id]; auto &OLv = ordered_L[new_v]; // Bit-parallel Labels memcpy(&Iv.bp_dist, &Lv.bp_dist, BITPARALLEL_SIZE * sizeof(weighti)); for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { memcpy(&Iv.bp_sets[b_i], &Lv.bp_sets[b_i], 2 * sizeof(uint64_t)); } // Normal Labels // Traverse v_id's all existing labels for (inti b_i = 0; b_i < Lv.batches.size(); ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; inti dist = Lv.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi tail = Lv.vertices[v_i] + id_offset; // idi new_tail = rank2id[tail]; // new_L[new_v].push_back(make_pair(new_tail, dist)); OLv.push_back(std::make_pair(tail, dist)); } } } // Sort sort(OLv.begin(), OLv.end()); // Store into Index inti size_labels = OLv.size(); labels_count += size_labels; Iv.label_id.resize(size_labels + 1); // Adding one for Sentinel Iv.label_dists.resize(size_labels + 1); // Adding one for Sentinel for (inti l_i = 0; l_i < size_labels; ++l_i) { Iv.label_id[l_i] = OLv[l_i].first; Iv.label_dists[l_i] = OLv[l_i].second; } Iv.label_id[size_labels] = num_v; // Sentinel Iv.label_dists[size_labels] = WEIGHTI_MAX; // Sentinel } printf("Label_size: %u mean: %f\n", labels_count, static_cast<double>(labels_count) / num_v); // // Test // { // puts("Asserting..."); // for (idi v_id = 0; v_id < num_v; ++v_id) { // const IndexType &Lv = L[v_id]; // const IndexOrdered &Iv = Index[rank2id[v_id]]; // // Bit-parallel Labels // for (inti b_i = 0; b_i < BITPARALLEL_SIZE; ++b_i) { // assert(Lv.bp_dist[b_i] == Iv.bp_dist[b_i]); // assert(Lv.bp_sets[b_i][0] == Iv.bp_sets[b_i][0]); // assert(Lv.bp_sets[b_i][1] == Iv.bp_sets[b_i][1]); // } // // Normal Labels // assert(Lv.vertices.size() == Iv.label_id.size()); // assert(Lv.vertices.size() == Iv.label_dists.size()); //// { //// inti bound_i = Iv.label_id.size() > 10 ? 10 : Iv.label_id.size(); //// printf("V %u:", rank2id[v_id]); //// for (inti i = 0; i < bound_i; ++i) { //// printf(" (%u, %u)", Iv.label_id[i], Iv.label_dists[i]); //// } //// puts(""); //// } // // } // puts("Asserted."); // } } template <inti BATCH_SIZE> weighti ParaVertexCentricPLL<BATCH_SIZE>::query_distance( idi a, idi b) { idi num_v = num_v_; if (a >= num_v || b >= num_v) { return a == b ? 0 : WEIGHTI_MAX; } // // A is shorter than B // IndexOrdered &Ia = (Index[a].label_id.size() < Index[b].label_id.size()) ? Index[a] : Index[b]; // IndexOrdered &Ib = (Index[a].label_id.size() < Index[b].label_id.size()) ? Index[b] : Index[a]; // // A is longer than B // IndexOrdered &Ia = (Index[a].label_id.size() > Index[b].label_id.size()) ? Index[a] : Index[b]; // IndexOrdered &Ib = (Index[a].label_id.size() > Index[b].label_id.size()) ? Index[b] : Index[a]; IndexOrdered &Ia = Index[a]; IndexOrdered &Ib = Index[b]; // const IndexOrdered &Ia = Index[a]; // const IndexOrdered &Ib = Index[b]; inti d = WEIGHTI_MAX; _mm_prefetch(&Ia.label_id[0], _MM_HINT_T0); _mm_prefetch(&Ib.label_id[0], _MM_HINT_T0); _mm_prefetch(&Ia.label_dists[0], _MM_HINT_T0); _mm_prefetch(&Ib.label_dists[0], _MM_HINT_T0); // Bit-Parallel Labels for (int i = 0; i < BITPARALLEL_SIZE; ++i) { int td = Ia.bp_dist[i] + Ib.bp_dist[i]; if (td - 2 <= d) { td += (Ia.bp_sets[i][0] & Ib.bp_sets[i][0]) ? -2 : ((Ia.bp_sets[i][0] & Ib.bp_sets[i][1]) | (Ia.bp_sets[i][1] & Ib.bp_sets[i][0])) ? -1 : 0; if (td < d) { d = td; } } } // Normal Labels (ordered) // // Vectorizaed Version // vector<idi> &A = Ia.label_id; // vector<idi> &B = Ib.label_id; // idi len_B = B.size() - 1; //// idi len_B = B.size(); // idi bound_b_base_i = len_B - (len_B % NUM_P_INT); // idi a_i = 0; // idi b_base_i = 0; // idi len_A = A.size() - 1; //// idi len_A = A.size(); // ++length_larger_than_16.second; // if (len_B >= 16) { // ++length_larger_than_16.first; // } // while (a_i < len_A && b_base_i < bound_b_base_i) { // int a = A[a_i]; // __m512i a_v = _mm512_set1_epi32(a); // // // Packed b // __m512i b_v = _mm512_loadu_epi32(&B[b_base_i]); // @suppress("Function cannot be resolved") // __mmask16 is_equal_m = _mm512_cmpeq_epi32_mask(a_v, b_v); // if (is_equal_m) { //// if (a == num_v) { //// break; // Sentinel //// } // inti td = Ia.label_dists[a_i] + Ib.label_dists[b_base_i + (idi) (log2(is_equal_m))]; // if (td < d) { // d = td; // } // // // Advance index // if (is_equal_m & (__mmask16) 0x8000) { // ++a_i; // b_base_i += NUM_P_INT; // } else { // a_i += (a < B[b_base_i + NUM_P_INT - 1]) ? 1 : 0; // b_base_i += (B[b_base_i + NUM_P_INT - 1] < a) ? NUM_P_INT : 0; // } // } else { // // Advance index // a_i += (a < B[b_base_i + NUM_P_INT - 1]) ? 1 : 0; // b_base_i += (B[b_base_i + NUM_P_INT - 1] < a) ? NUM_P_INT : 0; // } // } // while (a_i < len_A && b_base_i < len_B) { // if (A[a_i] == B[b_base_i]) { //// if (a == num_v) { //// break; // Sentinel //// } // inti td = Ia.label_dists[a_i] + Ib.label_dists[b_base_i]; // if (td < d) { // d = td; // } // // // Advance index // ++a_i; // ++b_base_i; // } else { // // Advance index // a_i += (A[a_i] < B[b_base_i]) ? 1 : 0; // b_base_i += (B[b_base_i] < A[a_i]) ? 1 : 0; // } // } // Sequential Version for (idi i1 = 0, i2 = 0; ; ) { idi v1 = Ia.label_id[i1], v2 = Ib.label_id[i2]; if (v1 == v2) { if (v1 == num_v) { break; // Sentinel } inti td = Ia.label_dists[i1] + Ib.label_dists[i2]; if (td < d) { d = td; } ++i1; ++i2; } else { i1 += v1 < v2 ? 1 : 0; i2 += v1 > v2 ? 1 : 0; } } if (d >= WEIGHTI_MAX - 2) { d = WEIGHTI_MAX; } return d; } template <inti BATCH_SIZE> void ParaVertexCentricPLL<BATCH_SIZE>::switch_labels_to_old_id( const vector<idi> &rank2id, const vector<idi> &rank) { idi label_sum = 0; idi test_label_sum = 0; // idi num_v = rank2id.size(); idi num_v = rank.size(); vector< vector< pair<idi, weighti> > > new_L(num_v); // for (idi r = 0; r < num_v; ++r) { // idi v = rank2id[r]; // const IndexType &Lr = L[r]; // IndexType &Lv = new_L[v]; // idi size = Lr.get_size(); // label_sum += size; // for (idi li = 0; li < size; ++li) { // idi l = Lr.get_label_ith_v(li); // idi new_l = rank2id[l]; // Lv.add_label_seq(new_l, Lr.get_label_ith_d(li)); // } // } // L = new_L; for (idi v_id = 0; v_id < num_v; ++v_id) { idi new_v = rank2id[v_id]; const IndexType &Lv = L[v_id]; // Traverse v_id's all existing labels for (inti b_i = 0; b_i < Lv.batches.size(); ++b_i) { idi id_offset = Lv.batches[b_i].batch_id * BATCH_SIZE; idi dist_start_index = Lv.batches[b_i].start_index; idi dist_bound_index = dist_start_index + Lv.batches[b_i].size; // Traverse dist_matrix for (idi dist_i = dist_start_index; dist_i < dist_bound_index; ++dist_i) { label_sum += Lv.distances[dist_i].size; idi v_start_index = Lv.distances[dist_i].start_index; idi v_bound_index = v_start_index + Lv.distances[dist_i].size; inti dist = Lv.distances[dist_i].dist; for (idi v_i = v_start_index; v_i < v_bound_index; ++v_i) { idi tail = Lv.vertices[v_i] + id_offset; // idi new_tail = rank2id[tail]; // new_L[new_v].push_back(make_pair(new_tail, dist)); new_L[new_v].push_back(std::make_pair(tail, dist)); ++test_label_sum; } } } } printf("Label sum: %u %u mean: %f\n", label_sum, test_label_sum, label_sum * 1.0 / num_v); // // Try to print // for (idi v = 0; v < num_v; ++v) { // const auto &Lv = new_L[v]; // idi size = Lv.size(); // printf("Vertex %u (Size %u):", v, size); // for (idi i = 0; i < size; ++i) { // printf(" (%u, %d)", Lv[i].first, Lv[i].second); // fflush(stdout); // } // puts(""); // } // // Try query // idi u; // idi v; // while (std::cin >> u >> v) { // weighti dist = WEIGHTI_MAX; // // Bit Parallel Check // const IndexType &idx_u = L[rank[u]]; // const IndexType &idx_v = L[rank[v]]; // // for (inti i = 0; i < BITPARALLEL_SIZE; ++i) { // int td = idx_v.bp_dist[i] + idx_u.bp_dist[i]; // if (td - 2 <= dist) { // td += // (idx_v.bp_sets[i][0] & idx_u.bp_sets[i][0]) ? -2 : // ((idx_v.bp_sets[i][0] & idx_u.bp_sets[i][1]) // | (idx_v.bp_sets[i][1] & idx_u.bp_sets[i][0])) // ? -1 : 0; // if (td < dist) { // dist = td; // } // } // } // // // Normal Index Check // const auto &Lu = new_L[u]; // const auto &Lv = new_L[v]; //// unsorted_map<idi, weighti> markers; // map<idi, weighti> markers; // for (idi i = 0; i < Lu.size(); ++i) { // markers[Lu[i].first] = Lu[i].second; // } // for (idi i = 0; i < Lv.size(); ++i) { // const auto &tmp_l = markers.find(Lv[i].first); // if (tmp_l == markers.end()) { // continue; // } // int d = tmp_l->second + Lv[i].second; // if (d < dist) { // dist = d; // } // } // if (dist == 255) { // printf("2147483647\n"); // } else { // printf("%u\n", dist); // } // } } } #endif /* INCLUDES_PADO_H_ */
solver.h
#include "mesh.h" #include "arap.h" #include "elastic.h" #include <LBFGS.h> using namespace LBFGSpp; using json = nlohmann::json; using namespace Eigen; using namespace std; class Rosenbrock { private: int n; Mesh* mesh; Arap* arap; Elastic* elas; double alpha_arap = 1; double alpha_neo = 1; double eps = 1.5e-8; bool stest = false; public: Rosenbrock(int n_, Mesh* m, Arap* a, Elastic* e, json& j_input, bool test=false) : n(n_) { mesh = m; arap = a; elas = e; alpha_arap = j_input["alpha_arap"]; alpha_neo = j_input["alpha_neo"]; stest = test; } VectorXd get_w(VectorXd& r0, VectorXd& r){ VectorXd w = VectorXd::Zero(r0.size()/3); for(int i=0; i<r0.size()/9; i++){ Matrix3d R0, R; R0<<r0[9*i+0],r0[9*i+1],r0[9*i+2], r0[9*i+3],r0[9*i+4],r0[9*i+5], r0[9*i+6],r0[9*i+7],r0[9*i+8]; R<<r[9*i+0],r[9*i+1],r[9*i+2], r[9*i+3],r[9*i+4],r[9*i+5], r[9*i+6],r[9*i+7],r[9*i+8]; Matrix3d exp_brac_w = R0.transpose()*R; Matrix3d brac_w = exp_brac_w.log(); w[3*i+0] = brac_w(2,1); w[3*i+1] = brac_w(0,2); w[3*i+2] = brac_w(1,0); } return w; } //CHECK E,x------------- VectorXd Ex(Mesh& mesh, Arap& arap, double E0, double eps){ VectorXd z = mesh.red_x(); VectorXd fake = VectorXd::Zero(z.size()); #pragma omp parallel for for(int i=0; i<fake.size(); i++){ z[i] += 0.5*eps; double Eleft = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); z[i] -= 0.5*eps; z[i] -= 0.5*eps; double Eright = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); z[i] += 0.5*eps; fake[i] = (Eleft - Eright)/eps; } return fake; } //----------------------- //CHECK E,r------------- VectorXd Er(Mesh& mesh, Arap& arap, double E0, double eps){ VectorXd z = mesh.red_x(); VectorXd fake = VectorXd::Zero(mesh.red_w().size()); for(int i=0; i<fake.size(); i++){ mesh.red_w()[i] += 0.5*eps; // mesh.setGlobalF(true, false, false); double Eleft = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_w()[i] -= 0.5*eps; mesh.red_w()[i] -= 0.5*eps; // mesh.setGlobalF(true, false, false); double Eright = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_w()[i] += 0.5*eps; fake[i] = (Eleft - Eright)/(eps); } // mesh.setGlobalF(true, false, false); return fake; } //----------------------- //CHECK E,s------------- VectorXd Es(Mesh& mesh, Arap& arap, double E0, double eps){ VectorXd z = mesh.red_x(); VectorXd fake = VectorXd::Zero(mesh.red_s().size()); for(int i=0; i<fake.size(); i++){ mesh.red_s()[i] += 0.5*eps; // mesh.setGlobalF(false, true, false); double Eleft = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_s()[i] -= 0.5*eps; mesh.red_s()[i] -= 0.5*eps; // mesh.setGlobalF(false, true, false); double Eright = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_s()[i] += 0.5*eps; fake[i] = (Eleft - Eright)/eps; } // mesh.setGlobalF(false, true, false); return fake; } //----------------------- //CHECK Exx-------------- MatrixXd Exx(Mesh& mesh, Arap& arap, double E0, double eps){ MatrixXd fake = MatrixXd::Zero(mesh.red_x().size(), mesh.red_x().size()); VectorXd z = mesh.red_x(); for(int i=0; i<fake.rows(); i++){ for(int j=0; j<fake.cols(); j++){ z[i] += eps; z[j] += eps; double Eij = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); z[i] -= eps; z[j] -= eps; z[i] += eps; double Ei = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); z[i] -= eps; z[j] += eps; double Ej = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); z[j] -= eps; fake(i,j) = ((Eij - Ei - Ej + E0)/(eps*eps)); } } return fake; } //----------------------- //CHECK Exr/Erx------------- MatrixXd Exr(Mesh& mesh, Arap& arap, double E0, double eps){ MatrixXd fake = MatrixXd::Zero(mesh.red_x().size(), mesh.red_w().size()); VectorXd z = mesh.red_x(); for(int i=0; i<fake.rows(); i++){ for(int j=0; j<fake.cols(); j++){ mesh.red_w()[j] += eps; z[i] += eps; // mesh.setGlobalF(true, false, false); double Eij = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_w()[j] -= eps; z[i] -= eps; mesh.red_w()[j] += eps; // mesh.setGlobalF(true, false, false); double Ei = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_w()[j] -= eps; z[i] += eps; // mesh.setGlobalF(true, false, false); double Ej = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); z[i] -= eps; fake(i,j) = ((Eij - Ei - Ej + E0)/(eps*eps)); } } // mesh.setGlobalF(true, false, false); return fake; } //----------------------- //CHECK Exs------------- MatrixXd Exs(Mesh& mesh, Arap& arap, double E0, double eps){ MatrixXd fake = MatrixXd::Zero(mesh.red_x().size(), mesh.red_s().size()); VectorXd z = mesh.red_x(); for(int i=0; i<fake.rows(); i++){ for(int j=0; j<fake.cols(); j++){ mesh.red_s()[j] += eps; z[i] += eps; // mesh.setGlobalF(false, true, false); double Eij = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_s()[j] -= eps; z[i] -= eps; mesh.red_s()[j] += eps; // mesh.setGlobalF(false, true, false); double Ei = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_s()[j] -= eps; z[i] += eps; // mesh.setGlobalF(false, true, false); double Ej = arap.Energy(mesh, z, mesh.red_w(), mesh.red_r(), mesh.red_s()); z[i] -= eps; fake(i,j) = ((Eij - Ei - Ej + E0)/(eps*eps)); } } // mesh.setGlobalF(false, true, false); return fake; } //----------------------- //CHECK Err-------------- MatrixXd Err(Mesh& mesh, Arap& arap, double E0, double eps){ MatrixXd fake = MatrixXd::Zero(mesh.red_w().size(), mesh.red_w().size()); for(int i=0; i<fake.rows(); i++){ for(int j=0; j<fake.cols(); j++){ mesh.red_w()[j] += eps; mesh.red_w()[i] += eps; // mesh.setGlobalF(true, false, false); double Eij = arap.Energy(mesh, mesh.red_x(), mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_w()[j] -= eps; mesh.red_w()[i] -= eps; mesh.red_w()[j] += eps; // mesh.setGlobalF(true, false, false); double Ei = arap.Energy(mesh, mesh.red_x(), mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_w()[j] -= eps; mesh.red_w()[i] += eps; // mesh.setGlobalF(true, false, false); double Ej = arap.Energy(mesh, mesh.red_x(), mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_w()[i] -= eps; fake(i,j) = ((Eij - Ei - Ej + E0)/(eps*eps)); } } // mesh.setGlobalF(true, false, false); return fake; } //----------------------- //CHECK Ers-------------- MatrixXd Ers(Mesh& mesh, Arap& arap, double E0, double eps){ MatrixXd fake = MatrixXd::Zero(mesh.red_w().size(), mesh.red_s().size()); for(int i=0; i<fake.rows(); i++){ for(int j=0; j<fake.cols(); j++){ mesh.red_w()[i] += eps; mesh.red_s()[j] += eps; // mesh.setGlobalF(true, true, false); double Eij = arap.Energy(mesh, mesh.red_x(), mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_s()[j] -= eps; mesh.red_w()[i] -= eps; mesh.red_w()[i] += eps; // mesh.setGlobalF(true, true, false); double Ei = arap.Energy(mesh, mesh.red_x(), mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_w()[i] -= eps; mesh.red_s()[j] += eps; // mesh.setGlobalF(true, true, false); double Ej = arap.Energy(mesh, mesh.red_x(), mesh.red_w(), mesh.red_r(), mesh.red_s()); mesh.red_s()[j] -= eps; fake(i,j) = ((Eij - Ei - Ej + E0)/(eps*eps)); } } // mesh.setGlobalF(true, true, false); return fake; } //----------------------- VectorXd Full_ARAP_Grad(Mesh& mesh, Arap& arap, Elastic& elas, double E0, double eps){ VectorXd fake = VectorXd::Zero(mesh.red_s().size()); for(int i=0; i<fake.size(); i++){ mesh.red_s()[i] += 0.5*eps; // mesh.setGlobalF(false, true, false); arap.minimize(mesh); double Eleft = alpha_arap*arap.Energy(mesh); mesh.red_s()[i] -= 0.5*eps; mesh.red_s()[i] -= 0.5*eps; // mesh.setGlobalF(false, true, false); arap.minimize(mesh); double Eright = alpha_arap*arap.Energy(mesh); mesh.red_s()[i] += 0.5*eps; fake[i] = (Eleft - Eright)/eps; } arap.minimize(mesh); // mesh.setGlobalF(false, true, false); // std::cout<<"FUll fake: "<<fake.transpose()<<std::endl; return fake; } // VectorXd Full_NEO_Grad(Mesh& mesh, Arap& arap, Elastic& elas, double E0, double eps){ // VectorXd fake = VectorXd::Zero(mesh.red_s().size()); // for(int i=0; i<fake.size(); i++){ // mesh.red_s()[i] += 0.5*eps; // // mesh.setGlobalF(false, true, false); // double Eleft = alpha_neo*elas.Energy(mesh); // mesh.red_s()[i] -= 0.5*eps; // mesh.red_s()[i] -= 0.5*eps; // // mesh.setGlobalF(false, true, false); // double Eright = alpha_neo*elas.Energy(mesh); // mesh.red_s()[i] += 0.5*eps; // fake[i] = (Eleft - Eright)/eps; // } // // mesh.setGlobalF(false, true, false); // // std::cout<<"FUll fake: "<<fake.transpose()<<std::endl; // return fake; // } VectorXd WikipediaEnergy_grad(Mesh& mesh, Elastic& elas, double eps){ VectorXd fake = VectorXd::Zero(mesh.red_s().size()); for(int i=0; i<fake.size(); i++){ mesh.red_s()[i] += 0.5*eps; double Eleft = elas.WikipediaEnergy(mesh); mesh.red_s()[i] -= 0.5*eps; mesh.red_s()[i] -= 0.5*eps; double Eright = elas.WikipediaEnergy(mesh); mesh.red_s()[i] += 0.5*eps; fake[i] = (Eleft - Eright)/eps; } // mesh.setGlobalF(false, true, false); // std::cout<<"FUll fake: "<<fake.transpose()<<std::endl; return fake; } double operator()(const VectorXd& x, VectorXd& grad, bool computeGrad = true) { VectorXd reds = mesh->N()*x + mesh->AN()*mesh->AN().transpose()*mesh->red_s(); for(int i=0; i<reds.size(); i++){ mesh->red_s()[i] = reds[i]; } double Eneo = alpha_neo*elas->Energy(*mesh); int arap_iters = arap->minimize(*mesh); double Earap = alpha_arap*arap->Energy(*mesh); double fx = Eneo + Earap; if(computeGrad){ VectorXd pegrad = alpha_neo*mesh->N().transpose()*elas->PEGradient(*mesh); VectorXd arapgrad = alpha_arap*mesh->N().transpose()*arap->dEds(*mesh); if(stest){ VectorXd fake_arap = mesh->N().transpose()*Full_ARAP_Grad(*mesh, *arap,*elas, fx, eps); if ((arapgrad-fake_arap).norm()>10){ double E0 = arap->Energy(*mesh); std::cout<<"fake arap issues"<<std::endl; std::cout<<arapgrad.transpose()<<std::endl<<std::endl; std::cout<<fake_arap.transpose()<<std::endl<<std::endl; cout<<"s"<<endl; std::cout<<x.transpose()<<endl<<endl; cout<<"r"<<endl; cout<<mesh->red_r().transpose()<<endl<<endl; cout<<"x"<<endl; cout<<mesh->red_x().transpose()<<endl<<endl; cout<<"-------------------------------------"<<endl; cout<<"Ex"<<endl; VectorXd fakeEx = Ex(*mesh, *arap, E0, eps); cout<<(arap->Ex().transpose()-fakeEx.transpose()).norm()<<endl<<endl; cout<<"Er"<<endl; VectorXd fakeEr = Er(*mesh, *arap, E0, eps); cout<<(arap->Er().transpose()-fakeEr.transpose()).norm()<<endl<<endl; cout<<"Es"<<endl; VectorXd fakeEs = Es(*mesh, *arap,E0, eps); cout<<(arap->Es().transpose() - fakeEs.transpose()).norm()<<endl<<endl; MatrixXd fakeExx = Exx(*mesh, *arap, E0, eps); cout<<"Exx"<<endl; cout<<(fakeExx-MatrixXd(arap->Exx())).norm()<<endl<<endl; cout<<endl<<endl; MatrixXd fakeExr = Exr(*mesh, *arap, E0, eps); cout<<"Exr"<<endl; cout<<(fakeExr-MatrixXd(arap->Exr())).norm()<<endl<<endl; cout<<endl<<endl; cout<<"Exs"<<endl; MatrixXd fakeExs = Exs(*mesh, *arap, E0, eps); cout<<(fakeExs-MatrixXd(arap->Exs())).norm()<<endl<<endl; cout<<endl; cout<<"Err"<<endl; MatrixXd fakeErr = Err(*mesh, *arap, E0, eps); cout<<(fakeErr-MatrixXd(arap->Err())).norm()<<endl<<endl; cout<<endl; cout<<"Ers"<<endl; MatrixXd fakeErs = Ers(*mesh, *arap, E0, eps); cout<<(fakeErs-MatrixXd(arap->Ers())).norm()<<endl<<endl; cout<<endl; exit(0); } } // VectorXd fake = alpha_neo*WikipediaEnergy_grad(*mesh, *elas, 1e-5); // if ((pegrad-fake_neo).norm()>0.001){ // std::cout<<"fake physics issues"<<std::endl; // std::cout<<x.transpose()<<std::endl; // std::cout<<arapgrad.transpose()<<std::endl<<std::endl; // std::cout<<fake_neo.transpose()<<std::endl<<std::endl; // exit(0); // } for(int i=0; i< x.size(); i++){ grad[i] = pegrad[i]; grad[i] += arapgrad[i]; } std::cout<<"BFGS: "<<Eneo<<", "<<Earap<<", "<<pegrad.norm()<<", "<<arapgrad.norm()<<","<<grad.norm()<<std::endl; } return fx; } };
GB_binop__le_uint16.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_mkl.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB_AaddB__le_uint16 // A.*B function (eWiseMult): GB_AemultB__le_uint16 // A*D function (colscale): GB_AxD__le_uint16 // D*A function (rowscale): GB_DxB__le_uint16 // C+=B function (dense accum): GB_Cdense_accumB__le_uint16 // C+=b function (dense accum): GB_Cdense_accumb__le_uint16 // C+=A+B function (dense ewise3): (none) // C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__le_uint16 // C=scalar+B GB_bind1st__le_uint16 // C=scalar+B' GB_bind1st_tran__le_uint16 // C=A+scalar GB_bind2nd__le_uint16 // C=A'+scalar GB_bind2nd_tran__le_uint16 // C type: bool // A type: uint16_t // B,b type: uint16_t // BinaryOp: cij = (aij <= bij) #define GB_ATYPE \ uint16_t #define GB_BTYPE \ uint16_t #define GB_CTYPE \ bool // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 0 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 0 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ uint16_t aij = Ax [pA] // bij = Bx [pB] #define GB_GETB(bij,Bx,pB) \ uint16_t bij = Bx [pB] // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ bool t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA) \ cij = Ax [pA] // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB) \ cij = Bx [pB] #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z, x, y) \ z = (x <= y) ; // op is second #define GB_OP_IS_SECOND \ 0 // op is plus_fp32 or plus_fp64 #define GB_OP_IS_PLUS_REAL \ 0 // op is minus_fp32 or minus_fp64 #define GB_OP_IS_MINUS_REAL \ 0 // GB_cblas_*axpy gateway routine, if it exists for this operator and type: #define GB_CBLAS_AXPY \ (none) // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_LE || GxB_NO_UINT16 || GxB_NO_LE_UINT16) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void (none) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB_Cdense_ewise3_noaccum__le_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumB__le_uint16 ( GrB_Matrix C, const GrB_Matrix B, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { #include "GB_dense_subassign_23_template.c" } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB_Cdense_accumb__le_uint16 ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if 0 { // get the scalar b for C += b, of type uint16_t uint16_t bwork = (*((uint16_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_AxD__le_uint16 ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *GB_RESTRICT kfirst_slice, const int64_t *GB_RESTRICT klast_slice, const int64_t *GB_RESTRICT pstart_slice, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_colscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB_DxB__le_uint16 ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *GB_RESTRICT Cx = (bool *) C->x ; #include "GB_AxB_rowscale_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C = A+B or C<M> = A+B //------------------------------------------------------------------------------ GrB_Info GB_AaddB__le_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_add_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C = A.*B or C<M> = A.*B //------------------------------------------------------------------------------ GrB_Info GB_AemultB__le_uint16 ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *GB_RESTRICT C_to_M, const int64_t *GB_RESTRICT C_to_A, const int64_t *GB_RESTRICT C_to_B, const GB_task_struct *GB_RESTRICT TaskList, const int ntasks, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB_bind1st__le_uint16 ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else bool *Cx = (bool *) Cx_output ; uint16_t x = (*((uint16_t *) x_input)) ; uint16_t *Bx = (uint16_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t bij = Bx [p] ; Cx [p] = (x <= bij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB_bind2nd__le_uint16 ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; bool *Cx = (bool *) Cx_output ; uint16_t *Ax = (uint16_t *) Ax_input ; uint16_t y = (*((uint16_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { uint16_t aij = Ax [p] ; Cx [p] = (aij <= y) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (x <= aij) ; \ } GrB_Info GB_bind1st_tran__le_uint16 ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint16_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t x = (*((const uint16_t *) x_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint16_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typcasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint16_t aij = Ax [pA] ; \ Cx [pC] = (aij <= y) ; \ } GrB_Info GB_bind2nd_tran__le_uint16 ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint16_t y = (*((const uint16_t *) y_input)) ; #define GB_PHASE_2_OF_2 #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
GB_binop__rminus_fp32.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_fp32) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_fp32) // A*D function (colscale): GB (_AxD__rminus_fp32) // D*A function (rowscale): GB (_DxB__rminus_fp32) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_fp32) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_fp32) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_fp32) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_fp32) // C=scalar+B GB (_bind1st__rminus_fp32) // C=scalar+B' GB (_bind1st_tran__rminus_fp32) // C=A+scalar GB (_bind2nd__rminus_fp32) // C=A'+scalar GB (_bind2nd_tran__rminus_fp32) // C type: float // A type: float // A pattern? 0 // B type: float // B pattern? 0 // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ float #define GB_BTYPE \ float #define GB_CTYPE \ float // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ float aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ float bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ float t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_FP32 || GxB_NO_RMINUS_FP32) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_fp32) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type float float bwork = (*((float *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *restrict Cx = (float *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_fp32) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; float alpha_scalar ; float beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((float *) alpha_scalar_in)) ; beta_scalar = (*((float *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_fp32) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_fp32) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_fp32) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float *Cx = (float *) Cx_output ; float x = (*((float *) x_input)) ; float *Bx = (float *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; float bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_fp32) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; float *Cx = (float *) Cx_output ; float *Ax = (float *) Ax_input ; float y = (*((float *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; float aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_fp32) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ float #if GB_DISABLE return (GrB_NO_VALUE) ; #else float x = (*((const float *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ float } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ float aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_fp32) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else float y = (*((const float *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
0fb71d8c99a5efbcd5cf722837fd0590c524be99.c
#define _POSIX_C_SOURCE 200809L #include "stdlib.h" #include "math.h" #include "sys/time.h" #include "xmmintrin.h" #include "pmmintrin.h" #include "omp.h" struct dataobj { void *restrict data; int * size; int * npsize; int * dsize; int * hsize; int * hofs; int * oofs; } ; struct profiler { double section0; } ; int padfunc(struct dataobj *restrict vp_vec, const int x_M, const int y_M, const int abc_x_l_ltkn, const int abc_x_r_rtkn, const int abc_y_l_ltkn, const int abc_y_r_rtkn, struct profiler * timers, const int x_m, const int y_m, const int nthreads) { float (*restrict vp)[vp_vec->size[1]] __attribute__ ((aligned (64))) = (float (*)[vp_vec->size[1]]) vp_vec->data; /* Flush denormal numbers to zero in hardware */ _MM_SET_DENORMALS_ZERO_MODE(_MM_DENORMALS_ZERO_ON); _MM_SET_FLUSH_ZERO_MODE(_MM_FLUSH_ZERO_ON); struct timeval start_section0, end_section0; gettimeofday(&start_section0, NULL); /* Begin section0 */ for (int abc_x_l = x_m; abc_x_l <= abc_x_l_ltkn + x_m - 1; abc_x_l += 1) { #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(1) schedule(dynamic,1) for (int y = y_m; y <= y_M; y += 1) { vp[abc_x_l + 6][y + 6] = vp[46][y + 6]; } } } for (int abc_x_r = -abc_x_r_rtkn + x_M + 1; abc_x_r <= x_M; abc_x_r += 1) { #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(1) schedule(dynamic,1) for (int y = y_m; y <= y_M; y += 1) { vp[abc_x_r + 6][y + 6] = vp[x_M - 34][y + 6]; } } } #pragma omp parallel num_threads(nthreads) { #pragma omp for collapse(1) schedule(dynamic,1) for (int x = x_m; x <= x_M; x += 1) { for (int abc_y_l = y_m; abc_y_l <= abc_y_l_ltkn + y_m - 1; abc_y_l += 1) { vp[x + 6][abc_y_l + 6] = vp[x + 6][46]; } for (int abc_y_r = -abc_y_r_rtkn + y_M + 1; abc_y_r <= y_M; abc_y_r += 1) { vp[x + 6][abc_y_r + 6] = vp[x + 6][y_M - 34]; } } } /* End section0 */ gettimeofday(&end_section0, NULL); timers->section0 += (double)(end_section0.tv_sec-start_section0.tv_sec)+(double)(end_section0.tv_usec-start_section0.tv_usec)/1000000; return 0; }
fill.c
#ifdef __cplusplus extern "C" { #endif extern void CXX_Fill_alpha(const unsigned int row, const unsigned int col, const double a, const double v[], void *ptr); extern void CXX_Fill(const unsigned int row, const unsigned int col, const double v[], void *ptr); extern void CXX_Fill_boundary(const unsigned int row, const unsigned int col, const unsigned int n, const double v[], void *ptr); extern void CXX_Fill_diagonal(const unsigned int row, const unsigned int col, const double v, void *ptr); extern void CXX_Fill_Reset(void *ptr); #ifdef __cplusplus } #endif #include <stddef.h> #include <string.h> #include <stdint.h> #include <omp.h> #include <math.h> #include "geometry.h" #include "bench.h" #include "phy.h" #include "core_kernel.h" //static void fill_alpha( // const unsigned int row, // const unsigned int col, // const double a, // const double v[], // const unsigned int bsz2, // const unsigned int *ia, // const unsigned int *ja, // double *aa) //{ // uint32_t i; // for(i = ia[row]; i < ia[row+1]; i++) // { // if(ja[i] == col) // { // uint32_t j; // for(j = 0; j < bsz2; j++) aa[bsz2 * i + j] += a * v[j]; // // break; // } // } //} // //static void fill( // const unsigned int row, // const unsigned int col, // const double v[], // const unsigned int bsz2, // const unsigned int *ia, // const unsigned int *ja, // double *aa) //{ // uint32_t i; // for(i = ia[row]; i < ia[row+1]; i++) // { // if(ja[i] == col) // { // uint32_t j; // for(j = 0; j < bsz2; j++) aa[bsz2 * i + j] += v[j]; // // break; // } // } //} // //static void fill_boundary( // const unsigned int row, // const unsigned int col, // const double v[], // const unsigned int n, // const unsigned int bsz2, // const unsigned int *ia, // const unsigned int *ja, // double *aa) //{ // uint32_t i; // for(i = ia[row]; i < ia[row+1]; i++) // { // if(ja[i] == col) // { // uint32_t j; // for(j = 1; j <= n; j++) aa[bsz2 * i + (j * n + j)] += v[j-1]; // // break; // } // } //} // //static void fill_diagonal( // const unsigned int row, // const unsigned int col, // const double v, // const unsigned int bsz2, // const unsigned int *ia, // const unsigned int *ja, // double *aa) //{ // uint32_t i; // for(i = ia[row]; i < ia[row+1]; i++) // { // if(ja[i] == col) // { // aa[bsz2 * i + 0] += v; // aa[bsz2 * i + 5] += v; // aa[bsz2 * i + 10] += v; // aa[bsz2 * i + 15] += v; // // break; // } // } //} static void _KRN_ComputeA( const size_t nnodes, const size_t nsnodes, const size_t nfnodes, const uint32_t bsz, const uint32_t *nsptr, const uint32_t *nfptr, const double *s_xyz0, const double *s_xyz1, const double *s_xyz2, const double *f_xyz0, const double *f_xyz1, const double *f_xyz2, const uint32_t *ie, const uint32_t *part, const uint32_t *n0, const uint32_t *n1, const double *x0, const double *x1, const double *x2, const double *x3, const double *q, const double *cdt, void *matrix)//, //const size_t nnz, //const uint32_t bsz2, //const uint32_t *ia, //const uint32_t *ja, //double *aa) { //memset(aa, 0, nnz * sizeof(double)); CXX_Fill_Reset(matrix); #pragma omp parallel { uint32_t i; #pragma omp for for(i = 0; i < nnodes; i++) { // Store in the diagonal of the block //fill_diagonal(i, i, cdt[i], bsz2, ia, ja, aa); CXX_Fill_diagonal(i, i, cdt[i], matrix); } #pragma omp barrier uint32_t t = (uint32_t) omp_get_thread_num(); uint32_t ie0 = ie[t]; uint32_t ie1 = ie[t+1]; for(i = ie0; i < ie1; i++) { const uint32_t node0 = n0[i]; const uint32_t node1 = n1[i]; const double xn = x0[i]; const double yn = x1[i]; const double zn = x2[i]; const double ln = x3[i]; /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double dot = xn; double X1, Y1, Z1; if(fabs(dot) < 0.95f) { X1 = 1.f - dot * xn; Y1 = - dot * yn; Z1 = - dot * zn; } else { dot = yn; X1 = - dot * xn; Y1 = 1.f - dot * yn; Z1 = - dot * zn; } /* Normalize the first vector */ double size = X1 * X1; size += Y1 * Y1; size += Z1 * Z1; size = sqrt(size); X1 /= size; Y1 /= size; Z1 /= size; /* Take cross-product of normal and V1 to get V2 */ double X2 = yn * Z1; X2 -= zn * Y1; double Y2 = zn * X1; Y2 -= xn * Z1; double Z2 = xn * Y1; Z2 -= yn * X1; /* Variables on left */ // Velocity u double uL = q[bsz * node0 + 1]; // Velocity v double vL = q[bsz * node0 + 2]; // Velocity w double wL = q[bsz * node0 + 3]; double ubarL = xn * uL; ubarL += yn * vL; ubarL += zn * wL; /* Variables on right */ // Velocity u double uR = q[bsz * node1 + 1]; // Velocity v double vR = q[bsz * node1 + 2]; // Velocity w double wR = q[bsz * node1 + 3]; double ubarR = xn * uR; ubarR += yn * vR; ubarR += zn * wR; /* Now compute eigenvalues and |A| from averaged variables Avergage variables */ double u = 0.5f * (uL + uR); double v = 0.5f * (vL + vR); double w = 0.5f * (wL + wR); double ubar = xn * u; ubar += yn * v; ubar += zn * w; double c2 = ubar * ubar + B; double c = sqrt(c2); /* Put in the eigenvalue smoothing stuff */ double eig1 = ln * fabs(ubar); double eig2 = ln * fabs(ubar); double eig3 = ln * fabs(ubar + c); double eig4 = ln * fabs(ubar - c); double phi1 = xn * B; phi1 += u * ubar; double phi2 = yn * B; phi2 += v * ubar; double phi3 = zn * B; phi3 += w * ubar; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; /* Components of T(inverse) (call this y) */ double c2inv = 1.f / c2; double y11 = u * phi4; y11 += v * phi5; y11 += w * phi6; y11 = -c2inv * y11 / B; double y21 = u * phi7; y21 += v * phi8; y21 += w * phi9; y21 = -c2inv * y21 / B; double y31 = c2inv * (c - ubar); y31 = 0.5f * y31 / B; double y41 = c2inv * (c + ubar); y41 = -0.5f * y41 / B; double y12 = c2inv * phi4; double y22 = c2inv * phi7; double y32 = c2inv * 0.5f * xn; double y42 = c2inv * 0.5f * xn; double y13 = c2inv * phi5; double y23 = c2inv * phi8; double y33 = c2inv * 0.5f * yn; double y43 = c2inv * 0.5f * yn; double y14 = c2inv * phi6; double y24 = c2inv * phi9; double y34 = c2inv * 0.5f * zn; double y44 = c2inv * 0.5f * zn; /* Now get elements of T */ double t13 = c * B; double t23 = u * (ubar + c); t23 += xn * B; double t33 = v * (ubar + c); t33 += yn * B; double t43 = w * (ubar + c); t43 += zn * B; double t14 = -c * B; double t24 = u * (ubar - c); t24 += xn * B; double t34 = v * (ubar - c); t34 += yn * B; double t44 = w * (ubar - c); t44 += zn * B; /* Compute T * |lambda| * T(inv) */ double a11 = eig3 * t13 * y31; a11 += eig4 * t14 * y41; double a12 = eig3 * t13 * y32; a12 += eig4 * t14 * y42; double a13 = eig3 * t13 * y33; a13 += eig4 * t14 * y43; double a14 = eig3 * t13 * y34; a14 += eig4 * t14 * y44; double a21 = eig1 * X1 * y11; a21 += eig2 * X2 * y21; a21 += eig3 * t23 * y31; a21 += eig4 * t24 * y41; double a22 = eig1 * X1 * y12; a22 += eig2 * X2 * y22; a22 += eig3 * t23 * y32; a22 += eig4 * t24 * y42; double a23 = eig1 * X1 * y13; a23 += eig2 * X2 * y23; a23 += eig3 * t23 * y33; a23 += eig4 * t24 * y43; double a24 = eig1 * X1 * y14; a24 += eig2 * X2 * y24; a24 += eig3 * t23 * y34; a24 += eig4 * t24 * y44; double a31 = eig1 * Y1 * y11; a31 += eig2 * Y2 * y21; a31 += eig3 * t33 * y31; a31 += eig4 * t34 * y41; double a32 = eig1 * Y1 * y12; a32 += eig2 * Y2 * y22; a32 += eig3 * t33 * y32; a32 += eig4 * t34 * y42; double a33 = eig1 * Y1 * y13; a33 += eig2 * Y2 * y23; a33 += eig3 * t33 * y33; a33 += eig4 * t34 * y43; double a34 = eig1 * Y1* y14; a34 += eig2 * Y2 * y24; a34 += eig3 * t33 * y34; a34 += eig4 * t34 * y44; double a41 = eig1 * Z1 * y11; a41 += eig2 * Z2 * y21; a41 += eig3 * t43 * y31; a41 += eig4 * t44 * y41; double a42 = eig1 * Z1 * y12; a42 += eig2 * Z2 * y22; a42 += eig3 * t43 * y32; a42 += eig4 * t44 * y42; double a43 = eig1 * Z1 * y13; a43 += eig2 * Z2 * y23; a43 += eig3 * t43 * y33; a43 += eig4 * t44 * y43; double a44 = eig1 * Z1 * y14; a44 += eig2 * Z2 * y24; a44 += eig3 * t43 * y34; a44 += eig4 * t44 * y44; /* Regular Jacobians on left: Form 0.5 * (A + |A|) */ double lb = ln * B; double lx = ln * xn; double ly = ln * yn; double lz = ln * zn; /* Regular Jaobians on left */ double v0[16]; v0[0] = 0.5f * a11; v0[4] = 0.5f * (lx + a21); v0[8] = 0.5f * (ly + a31); v0[12] = 0.5f * (lz + a41); v0[1] = 0.5f * ((lb * xn) + a12); v0[5] = 0.5f * ((ln * (ubarL + xn * uL)) + a22); v0[9] = 0.5f * ((lx * vL) + a32); v0[13] = 0.5f * ((lx * wL) + a42); v0[2] = 0.5f * ((lb * yn) + a13); v0[6] = 0.5f * ((ly * uL) + a23); v0[10] = 0.5f * ((ln * (ubarL + yn * vL)) + a33); v0[14] = 0.5f * ((ly * wL) + a43); v0[3] = 0.5f * ((lb * zn) + a14); v0[7] = 0.5f * ((lz * uL) + a24); v0[11] = 0.5f * ((lz * vL) + a34); v0[15] = 0.5f * ((ln * (ubarL + zn * wL)) + a44); /* Regular Jaobians on right */ double v1[16]; v1[0] = 0.5f * -a11; v1[4] = 0.5f * (lx - a21); v1[8] = 0.5f * (ly - a31); v1[12] = 0.5f * (lz - a41); v1[1] = 0.5f * ((lb * xn) - a12); v1[5] = 0.5f * ((ln * (ubarR + xn * uR)) - a22); v1[9] = 0.5f * ((lx * vR) - a32); v1[13] = 0.5f * ((lx * wR) - a42); v1[2] = 0.5f * ((lb * yn) - a13); v1[6] = 0.5f * ((ly * uR) - a23); v1[10] = 0.5f * ((ln * (ubarR + yn * vR)) - a33); v1[14] = 0.5f * ((ly * wR) - a43); v1[3] = 0.5f * ((lb * zn) - a14); v1[7] = 0.5f * ((lz * uR) - a24); v1[11] = 0.5f * ((lz * vR) - a34); v1[15] = 0.5f * ((ln * (ubarR + zn * wR)) - a44); if(part[node0] == t) { //fill(node0, node0, v0, bsz2, ia, ja, aa); //fill(node0, node1, v1, bsz2, ia, ja, aa); CXX_Fill(node0, node0, v0, matrix); CXX_Fill(node0, node1, v1, matrix); } if(part[node1] == t) { //fill_alpha(node1, node0, -1.f, v0, bsz2, ia, ja, aa); //fill_alpha(node1, node1, -1.f, v1, bsz2, ia, ja, aa); CXX_Fill_alpha(node1, node0, -1.f, v0, matrix); CXX_Fill_alpha(node1, node1, -1.f, v1, matrix); } } #pragma omp barrier #pragma omp for for(i = 0; i < nsnodes; i++) { const double v[] = {s_xyz0[i], s_xyz1[i], s_xyz2[i]}; //fill_boundary(nsptr[i], nsptr[i], v, 3, bsz2, ia, ja, aa); CXX_Fill_boundary(nsptr[i], nsptr[i], 3, v, matrix); } #pragma omp barrier #pragma omp for for(i = 0; i < nfnodes; i++) { uint32_t n = nfptr[i]; double xn = f_xyz0[i]; double yn = f_xyz1[i]; double zn = f_xyz2[i]; double ln = sqrt(xn * xn + yn * yn + zn * zn); xn /= ln; yn /= ln; zn /= ln; /* 9 FLOPS */ /* Now lets get our other 2 vectors For first vector, use {1,0,0} and subtract off the component in the direction of the face normal. If the inner product of {1,0,0} is close to unity, use {0,1,0} */ double dot = xn; double X1, Y1, Z1; if(fabs(dot) < 0.95f) { X1 = 1.f - dot * xn; Y1 = - dot * yn; Z1 = - dot * zn; } else { dot = yn; X1 = - dot * xn; Y1 = 1.f - dot * yn; Z1 = - dot * zn; } /* 6 FLOPS */ /* Normalize the first vector (V1) */ double size = sqrt(X1 * X1 + Y1 * Y1 + Z1 * Z1); X1 /= size; Y1 /= size; Z1 /= size; /* 9 FLOPS */ /* Take cross-product of normal with V1 to get V2 */ double X2 = yn * Z1 - zn * Y1; double Y2 = zn * X1 - xn * Z1; double Z2 = xn * Y1 - yn * X1; /* 9 FLOPS */ /* Calculate elements of T and T(inverse) evaluated at freestream */ double ubar0 = xn * U; ubar0 += yn * V; ubar0 += zn * W; double c20 = ubar0 * ubar0 + B; double c0 = sqrt(c20); double phi1 = xn * B; phi1 += U * ubar0; double phi2 = yn * B; phi2 += V * ubar0; double phi3 = zn * B; phi3 += W * ubar0; double phi4 = Y2 * phi3; phi4 -= Z2 * phi2; double phi5 = Z2 * phi1; phi5 -= X2 * phi3; double phi6 = X2 * phi2; phi6 -= Y2 * phi1; double phi7 = Z1 * phi2; phi7 -= Y1 * phi3; double phi8 = X1 * phi3; phi8 -= Z1 * phi1; double phi9 = Y1 * phi1; phi9 -= X1 * phi2; /* 9 * 3 + 8 FLOPS */ double t13 = c0 * B; double t23 = U * (ubar0 + c0); t23 += xn * B; double t33 = V * (ubar0 + c0); t33 += yn * B; double t43 = W * (ubar0 + c0); t43 += zn * B; double t14 = -c0 * B; double t24 = U * (ubar0 - c0); t24 += xn * B; double t34 = V * (ubar0 - c0); t34 += yn * B; double t44 = W * (ubar0 - c0); t44 += zn * B; double ti11 = U * phi4; ti11 += V * phi5; ti11 += W * phi6; ti11 = -ti11 / B / c20; double ti21 = U * phi7; ti21 += V * phi8; ti21 += W * phi9; ti21 = -ti21 / B / c20; double ti31 = (c0 - ubar0) / (2.f * B * c20); double ti41 = -(c0 + ubar0) / (2.f * B * c20); double ti12 = phi4 / c20; double ti22 = phi7 / c20; double ti32 = 0.5f * xn / c20; double ti42 = 0.5f * xn / c20; double ti13 = phi5 / c20; double ti23 = phi8 / c20; double ti33 = 0.5f * yn / c20; double ti43 = 0.5f * yn / c20; double ti14 = phi6 / c20; double ti24 = phi9 / c20; double ti34 = 0.5f * zn / c20; double ti44 = 0.5f * zn / c20; /* 27 + 16 + 9 + 6 + 6 + 6 FLOPS */ /* Now, get the variables on the "inside" */ double pi = q[bsz * n + 0]; double ui = q[bsz * n + 1]; double vi = q[bsz * n + 2]; double wi = q[bsz * n + 3]; double un = xn * ui; un += yn * vi; un += zn * wi; /* 5 FLOPS */ /* If ubar is negative, take the reference condition from outside */ double pr, prp, ur, uru, vr, vrv, wr, wrw; if(un > 0.f) { pr = pi; prp = 1.f; ur = ui; uru = 1.f; vr = vi; vrv = 1.f; wr = wi; wrw = 1.f; } else { pr = P; prp = 0.f; ur = U; uru = 0.f; vr = V; vrv = 0.f; wr = W; wrw = 0.f; } /* Set rhs */ double rhs1 = ti11 * pr; rhs1 += ti12 * ur; rhs1 += ti13 * vr; rhs1 += ti14 * wr; double rhs1p = ti11 * prp; double rhs1u = ti12 * uru; double rhs1v = ti13 * vrv; double rhs1w = ti14 * wrw; double rhs2 = ti21 * pr; rhs2 += ti22 * ur; rhs2 += ti23 * vr; rhs2 += ti24 * wr; double rhs2p = ti21 * prp; double rhs2u = ti22 * uru; double rhs2v = ti23 * vrv; double rhs2w = ti24 * wrw; double rhs3 = ti31 * pi; rhs3 += ti32 * ui; rhs3 += ti33 * vi; rhs3 += ti34 * wi; double rhs4 = ti41 * P; rhs4 += ti42 * U; rhs4 += ti43 * V; rhs4 += ti44 * W; /* 12 + 24 FLOPS */ /* Now do matrix multiplication to get values on boundary */ double pb = t13 * rhs3; pb += t14 * rhs4; double pbp = t13 * ti31; double pbu = t13 * ti32; double pbv = t13 * ti33; double pbw = t13 * ti34; double ub = X1 * rhs1; ub += X2 * rhs2; ub += t23 * rhs3; ub += t24 * rhs4; double ubp = X1 * rhs1p; ubp += X2 * rhs2p; ubp += t23 * ti31; double ubu = X1 * rhs1u; ubu += X2 * rhs2u; ubu += t23 * ti32; double ubv = X1 * rhs1v; ubv += X2 * rhs2v; ubv += t23 * ti33; double ubw = X1 * rhs1w; ubw += X2 * rhs2w; ubw += t23 * ti34; double vb = Y1 * rhs1; vb += Y2 * rhs2; vb += t33 * rhs3; vb += t34 * rhs4; double vbp = Y1 * rhs1p; vbp += Y2 * rhs2p; vbp += t33 * ti31; double vbu = Y1 * rhs1u; vbu += Y2 * rhs2u; vbu += t33 * ti32; double vbv = Y1 * rhs1v; vbv += Y2 * rhs2v; vbv += t33 * ti33; double vbw = Y1 * rhs1w; vbw += Y2 * rhs2w; vbw += t33 * ti34; double wb = Z1 * rhs1; wb += Z2 * rhs2; wb += t43 * rhs3; wb += t44 * rhs4; double wbp = Z1 * rhs1p; wbp += Z2 * rhs2p; wbp += t43 * ti31; double wbu = Z1 * rhs1u; wbu += Z2 * rhs2u; wbu += t43 * ti32; double wbv = Z1 * rhs1v; wbv += Z2 * rhs2v; wbv += t43 * ti33; double wbw = Z1 * rhs1w; wbw += Z2 * rhs2w; wbw += t43 * ti34; /* 5 * 15 + 6 + 5 + 2 FLOPS */ double unb = xn * ub; unb += yn * vb; unb += zn * wb; double unbp = xn * ubp; unbp += yn * vbp; unbp += zn * wbp; double unbu = xn * ubu; unbu += yn * vbu; unbu += zn * wbu; double unbv = xn * ubv; unbv += yn * vbv; unbv += zn * wbv; double unbw = xn * ubw; unbw += yn * vbw; unbw += zn * wbw; /* 5 * 5 FLOPS */ /* Now add contribution to lhs */ double v[16]; v[0] = ln * B * unbp; v[4] = ln * (ub * unbp + unb * ubp + xn * pbp); v[8] = ln * (vb * unbp + unb * vbp + yn * pbp); v[12] = ln * (wb * unbp + unb * wbp + zn * pbp); v[1] = ln * B * unbu; v[5] = ln * (ub * unbu + unb * ubu + xn * pbu); v[9] = ln * (vb * unbu + unb * vbu + yn * pbu); v[13] = ln * (wb * unbu + unb * wbu + zn * pbu); v[2] = ln * B * unbv; v[6] = ln * (ub * unbv + unb * ubv + xn * pbv); v[10] = ln * (vb * unbv + unb * vbv + yn * pbv); v[14] = ln * (wb * unbv + unb * wbv + zn * pbv); v[3] = ln * B * unbw; v[7] = ln * (ub * unbw + unb * ubw + xn * pbw); v[11] = ln * (vb * unbw + unb * vbw + yn * pbw); v[15] = ln * (wb * unbw + unb * wbw + zn * pbw); //fill(n, n, v, bsz2, ia, ja, aa); CXX_Fill(n, n, v, matrix); } } } void ComputeA(GEOMETRY *g) { BENCH start_bench = rdbench(); _KRN_ComputeA( g->n->sz, g->b->s->sz, g->b->f->sz, g->c->b, g->b->s->nptr, g->b->f->nptr, g->b->s->xyz->x0, g->b->s->xyz->x1, g->b->s->xyz->x2, g->b->f->xyz->x0, g->b->f->xyz->x1, g->b->f->xyz->x2, g->s->i, g->n->part, g->e->eptr->n0, g->e->eptr->n1, g->e->xyzn->x0, g->e->xyzn->x1, g->e->xyzn->x2, g->e->xyzn->x3, g->q->q, g->n->cdt, g->matrix);//, //g->c->mat->i[g->n->sz] * g->c->b2, //g->c->b2, //g->c->mat->i, //g->c->mat->j, //g->c->mat->a); fun3d_log(start_bench, KERNEL_FLUX); }
sync.c
/** * \file * \brief BOMP barrier synchronization microbenchmark */ /* * Copyright (c) 2007, 2008, 2009, 2010, ETH Zurich. * All rights reserved. * * This file is distributed under the terms in the attached LICENSE file. * If you do not find this file, copies can be found by writing to: * ETH Zurich D-INFK, Haldeneggsteig 4, CH-8092 Zurich. Attn: Systems Group. */ #include <stdbool.h> #include <stdlib.h> #include <stdio.h> #include <stdint.h> #include <omp.h> #include <assert.h> #include <barrelfish/barrelfish.h> #include <trace/trace.h> #define PERIOD 2500000000UL #define ITERATIONS 10 #define STACK_SIZE (64 * 1024) struct workcnt { uint64_t cnt; } __attribute__ ((aligned (64))); int main(int argc, char *argv[]) { static struct workcnt workcnt[32]; static struct workcnt exittime[ITERATIONS]; int nthreads; int iterations = 0; uint64_t last; /* uint64_t last = rdtsc(); */ /* while(rdtsc() < last + PERIOD) { */ /* thread_yield(); */ /* } */ if(argc == 2) { nthreads = atoi(argv[1]); backend_span_domain(nthreads, STACK_SIZE); bomp_custom_init(); omp_set_num_threads(nthreads); } else { assert(!"Specify number of threads"); } #if CONFIG_TRACE errval_t err = trace_control(TRACE_EVENT(TRACE_SUBSYS_BOMP, TRACE_EVENT_BOMP_START, 0), TRACE_EVENT(TRACE_SUBSYS_BOMP, TRACE_EVENT_BOMP_STOP, 0), 0); assert(err_is_ok(err)); trace_event(TRACE_SUBSYS_BOMP, TRACE_EVENT_BOMP_START, 0); #endif /* bomp_synchronize(); */ last = rdtsc(); for(int iter = 0;; iter = (iter + 1) % ITERATIONS) { // Do some work #pragma omp parallel for(uint64_t i = 0;; i++) { #pragma omp barrier workcnt[omp_get_thread_num()].cnt++; #pragma omp master if(rdtsc() >= last + PERIOD) { #if CONFIG_TRACE trace_event(TRACE_SUBSYS_BOMP, TRACE_EVENT_BOMP_STOP, 0); char *buf = malloc(4096*4096); trace_dump(buf, 4096*4096); printf("%s\n", buf); abort(); #endif printf("%s, %lu: threads %d (%s), progress ", argv[0], rdtsc(), omp_get_num_threads(), omp_get_dynamic() ? "dynamic" : "static"); for(int n = 0; n < 32; n++) { printf("%lu ", workcnt[n].cnt); } printf("\n"); last += PERIOD; iterations++; if(iterations == 25) { printf("client done\n"); abort(); } if(exittime[iter].cnt == 0) { exittime[iter].cnt = i + 3; exittime[(iter + ITERATIONS - 2) % ITERATIONS].cnt = 0; } } if(exittime[iter].cnt != 0 && exittime[iter].cnt == i) { break; } } } }
repair.c
#include "../../shared.h" #include "hale.h" #include <float.h> #include <stdio.h> /* * NOTE: The repair phase is essentially a mesh-wide scattering stencil. * Essentially the whole stencil needs to be owned by a single thread to stop * data races... * * One method that could be employed here is to essentially break the problem * down and analyse the dependencies at runtime. * * Steps: * * 1) determine the quantities needed to repair extrema * 2) check the 2 deep stencil of each node/cell to check if we actually have a * dependency. * 3) construct an indirection with all independent work and one of the * dependent elements from each chain * 4) perform all of the work on that indirection in parallel * 5) construct another list that contains another single item of the work that * was considered dependent * 6) perform all of the individual dependent element's work * 7) repeat 5 and 6 until completion. */ // Repairs the subcell extrema for mass void repair_subcell_extrema(const int ncells, const int* cells_to_nodes_offsets, const int* subcells_to_subcells_offsets, const int* subcells_to_subcells, double* subcell_volume, double* subcell_mass); // Repairs the extrema at the nodal velocities void repair_velocity_extrema(const int nnodes, const int* nodes_to_nodes_offsets, const int* nodes_to_nodes, double* velocity_x, double* velocity_y, double* velocity_z); // Repairs the subcell extrema for mass void repair_energy_extrema(const int ncells, const int* cells_to_faces_offsets, const int* cells_to_faces, const int* faces_to_cells0, const int* faces_to_cells1, double* energy); // Redistributes the mass according to the determined neighbour availability void redistribute_subcell_mass(double* mass, const int subcell_index, const int nsubcell_neighbours, const int* subcells_to_subcells, const int subcell_to_subcells_off, const double* dmass_avail_neighbour, const double dmass_avail, const double dmass_need, const double g, const double subcell_vol, const int is_min); // Performs a conservative repair of the mesh void mass_repair_phase(UnstructuredMesh* umesh, HaleData* hale_data) { // Advects mass and energy through the subcell faces using swept edge approx repair_subcell_extrema(umesh->ncells, umesh->cells_to_nodes_offsets, hale_data->subcells_to_subcells_offsets, hale_data->subcells_to_subcells, hale_data->subcell_volume, hale_data->subcell_mass); } // Repairs the nodal velocities void velocity_repair_phase(UnstructuredMesh* umesh, HaleData* hale_data) { repair_velocity_extrema(umesh->nnodes, umesh->nodes_to_nodes_offsets, umesh->nodes_to_nodes, hale_data->velocity_x0, hale_data->velocity_y0, hale_data->velocity_z0); } // Repairs the energy void energy_repair_phase(UnstructuredMesh* umesh, HaleData* hale_data) { repair_energy_extrema(umesh->ncells, umesh->cells_to_faces_offsets, umesh->cells_to_faces, umesh->faces_to_cells0, umesh->faces_to_cells1, hale_data->energy0); } // Repairs the subcell extrema for mass void repair_velocity_extrema(const int nnodes, const int* nodes_to_nodes_offsets, const int* nodes_to_nodes, double* velocity_x, double* velocity_y, double* velocity_z) { #pragma omp parallel for for (int nn = 0; nn < nnodes; ++nn) { const int node_to_nodes_off = nodes_to_nodes_offsets[(nn)]; const int nnodes_by_node = nodes_to_nodes_offsets[(nn + 1)] - node_to_nodes_off; double gmax_vx = -DBL_MAX; double gmin_vx = DBL_MAX; double gmax_vy = -DBL_MAX; double gmin_vy = DBL_MAX; double gmax_vz = -DBL_MAX; double gmin_vz = DBL_MAX; double dvx_total_avail_donate = 0.0; double dvx_total_avail_receive = 0.0; double dvy_total_avail_donate = 0.0; double dvy_total_avail_receive = 0.0; double dvz_total_avail_donate = 0.0; double dvz_total_avail_receive = 0.0; double dvx_avail_donate_neighbour[(nnodes_by_node)]; double dvx_avail_receive_neighbour[(nnodes_by_node)]; double dvy_avail_donate_neighbour[(nnodes_by_node)]; double dvy_avail_receive_neighbour[(nnodes_by_node)]; double dvz_avail_donate_neighbour[(nnodes_by_node)]; double dvz_avail_receive_neighbour[(nnodes_by_node)]; // Loop over the nodes attached to this node for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) { const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)]; if (neighbour_index == -1) { continue; } const int neighbour_to_nodes_off = nodes_to_nodes_offsets[(neighbour_index)]; const int nnodes_by_neighbour = nodes_to_nodes_offsets[(neighbour_index + 1)] - neighbour_to_nodes_off; vec_t neighbour_v = {velocity_x[(neighbour_index)], velocity_y[(neighbour_index)], velocity_z[(neighbour_index)]}; double neighbour_gmax_vx = -DBL_MAX; double neighbour_gmin_vx = DBL_MAX; double neighbour_gmax_vy = -DBL_MAX; double neighbour_gmin_vy = DBL_MAX; double neighbour_gmax_vz = -DBL_MAX; double neighbour_gmin_vz = DBL_MAX; for (int nn3 = 0; nn3 < nnodes_by_neighbour; ++nn3) { const int neighbour_neighbour_index = nodes_to_nodes[(neighbour_to_nodes_off + nn3)]; if (neighbour_neighbour_index == -1) { continue; } neighbour_gmax_vx = max(neighbour_gmax_vx, velocity_x[(neighbour_neighbour_index)]); neighbour_gmin_vx = min(neighbour_gmin_vx, velocity_x[(neighbour_neighbour_index)]); neighbour_gmax_vy = max(neighbour_gmax_vy, velocity_y[(neighbour_neighbour_index)]); neighbour_gmin_vy = min(neighbour_gmin_vy, velocity_y[(neighbour_neighbour_index)]); neighbour_gmax_vz = max(neighbour_gmax_vz, velocity_z[(neighbour_neighbour_index)]); neighbour_gmin_vz = min(neighbour_gmin_vz, velocity_z[(neighbour_neighbour_index)]); } dvx_avail_donate_neighbour[(nn2)] = max(neighbour_v.x - neighbour_gmin_vx, 0.0); dvx_avail_receive_neighbour[(nn2)] = max(neighbour_gmax_vx - neighbour_v.x, 0.0); dvy_avail_donate_neighbour[(nn2)] = max(neighbour_v.y - neighbour_gmin_vy, 0.0); dvy_avail_receive_neighbour[(nn2)] = max(neighbour_gmax_vy - neighbour_v.y, 0.0); dvz_avail_donate_neighbour[(nn2)] = max(neighbour_v.z - neighbour_gmin_vz, 0.0); dvz_avail_receive_neighbour[(nn2)] = max(neighbour_gmax_vz - neighbour_v.z, 0.0); dvx_total_avail_donate += dvx_avail_donate_neighbour[(nn2)]; dvx_total_avail_receive += dvx_avail_receive_neighbour[(nn2)]; dvy_total_avail_donate += dvy_avail_donate_neighbour[(nn2)]; dvy_total_avail_receive += dvy_avail_receive_neighbour[(nn2)]; dvz_total_avail_donate += dvz_avail_donate_neighbour[(nn2)]; dvz_total_avail_receive += dvz_avail_receive_neighbour[(nn2)]; gmax_vx = max(gmax_vx, neighbour_v.x); gmin_vx = min(gmin_vx, neighbour_v.x); gmax_vy = max(gmax_vy, neighbour_v.y); gmin_vy = min(gmin_vy, neighbour_v.y); gmax_vz = max(gmax_vz, neighbour_v.z); gmin_vz = min(gmin_vz, neighbour_v.z); } vec_t cell_v = {velocity_x[(nn)], velocity_y[(nn)], velocity_z[(nn)]}; const double dvx_need_receive = gmin_vx - cell_v.x; const double dvx_need_donate = cell_v.x - gmax_vx; const double dvy_need_receive = gmin_vy - cell_v.y; const double dvy_need_donate = cell_v.y - gmax_vy; const double dvz_need_receive = gmin_vz - cell_v.z; const double dvz_need_donate = cell_v.z - gmax_vz; if (dvx_need_receive > 0.0) { velocity_x[(nn)] = gmin_vx; // Loop over the nodes attached to this node for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) { const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)]; if (neighbour_index == -1) { continue; } velocity_x[(neighbour_index)] -= (dvx_avail_donate_neighbour[(nn2)] / dvx_total_avail_donate) * dvx_need_receive; } } else if (dvx_need_donate > 0.0) { // Loop over the nodes attached to this node velocity_x[(nn)] = gmax_vx; for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) { const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)]; if (neighbour_index == -1) { continue; } velocity_x[(neighbour_index)] += (dvx_avail_receive_neighbour[(nn2)] / dvx_total_avail_receive) * dvx_need_donate; } } if (dvy_need_receive > 0.0) { velocity_y[(nn)] = gmin_vy; // Loop over the nodes attached to this node for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) { const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)]; if (neighbour_index == -1) { continue; } velocity_y[(neighbour_index)] -= (dvy_avail_donate_neighbour[(nn2)] / dvy_total_avail_donate) * dvy_need_receive; } } else if (dvy_need_donate > 0.0) { // Loop over the nodes attached to this node velocity_y[(nn)] = gmax_vy; for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) { const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)]; if (neighbour_index == -1) { continue; } velocity_y[(neighbour_index)] += (dvy_avail_receive_neighbour[(nn2)] / dvy_total_avail_receive) * dvy_need_donate; } } if (dvz_need_receive > 0.0) { velocity_z[(nn)] = gmin_vz; // Loop over the nodes attached to this node for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) { const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)]; if (neighbour_index == -1) { continue; } velocity_z[(neighbour_index)] -= (dvz_avail_donate_neighbour[(nn2)] / dvz_total_avail_donate) * dvz_need_receive; } } else if (dvz_need_donate > 0.0) { // Loop over the nodes attached to this node velocity_z[(nn)] = gmax_vz; for (int nn2 = 0; nn2 < nnodes_by_node; ++nn2) { const int neighbour_index = nodes_to_nodes[(node_to_nodes_off + nn2)]; if (neighbour_index == -1) { continue; } velocity_z[(neighbour_index)] += (dvz_avail_receive_neighbour[(nn2)] / dvz_total_avail_receive) * dvz_need_donate; } } if (dvx_total_avail_donate < dvx_need_receive || dvx_total_avail_receive < dvx_need_donate || dvy_total_avail_donate < dvy_need_receive || dvy_total_avail_receive < dvy_need_donate || dvz_total_avail_donate < dvz_need_receive || dvz_total_avail_receive < dvz_need_donate) { printf("Repair stage needs additional level.\n"); continue; } } } // Repairs the subcell extrema for mass void repair_energy_extrema(const int ncells, const int* cells_to_faces_offsets, const int* cells_to_faces, const int* faces_to_cells0, const int* faces_to_cells1, double* energy) { #pragma omp parallel for for (int cc = 0; cc < ncells; ++cc) { const int cell_to_faces_off = cells_to_faces_offsets[(cc)]; const int nfaces_by_cell = cells_to_faces_offsets[(cc + 1)] - cell_to_faces_off; double gmax_ie = -DBL_MAX; double gmin_ie = DBL_MAX; double die_total_avail_donate = 0.0; double die_total_avail_receive = 0.0; double die_avail_donate_neighbour[(nfaces_by_cell)]; double die_avail_receive_neighbour[(nfaces_by_cell)]; const double cell_ie = energy[(cc)]; // Loop over the nodes attached to this node for (int ff = 0; ff < nfaces_by_cell; ++ff) { const int face_index = cells_to_faces[(cell_to_faces_off + ff)]; const int neighbour_index = (faces_to_cells0[(face_index)] == cc) ? faces_to_cells1[(face_index)] : faces_to_cells0[(face_index)]; if (neighbour_index == -1) { continue; } const double neighbour_ie = energy[(neighbour_index)]; double neighbour_gmax_ie = -DBL_MAX; double neighbour_gmin_ie = DBL_MAX; const int neighbour_to_faces_off = cells_to_faces_offsets[(neighbour_index)]; const int nfaces_by_neighbour = cells_to_faces_offsets[(neighbour_index + 1)] - neighbour_to_faces_off; for (int ff2 = 0; ff2 < nfaces_by_neighbour; ++ff2) { const int neighbour_face_index = cells_to_faces[(neighbour_to_faces_off + ff2)]; const int neighbour_neighbour_index = (faces_to_cells0[(neighbour_face_index)] == neighbour_index) ? faces_to_cells1[(neighbour_face_index)] : faces_to_cells0[(neighbour_face_index)]; if (neighbour_neighbour_index == -1) { continue; } neighbour_gmax_ie = max(neighbour_gmax_ie, energy[(neighbour_neighbour_index)]); neighbour_gmin_ie = min(neighbour_gmin_ie, energy[(neighbour_neighbour_index)]); } die_avail_donate_neighbour[(ff)] = max(neighbour_ie - neighbour_gmin_ie, 0.0); die_avail_receive_neighbour[(ff)] = max(neighbour_gmax_ie - neighbour_ie, 0.0); die_total_avail_donate += die_avail_donate_neighbour[(ff)]; die_total_avail_receive += die_avail_receive_neighbour[(ff)]; gmax_ie = max(gmax_ie, neighbour_ie); gmin_ie = min(gmin_ie, neighbour_ie); } const double die_need_receive = gmin_ie - cell_ie; const double die_need_donate = cell_ie - gmax_ie; if (die_need_receive > 0.0) { energy[(cc)] = gmin_ie; for (int ff = 0; ff < nfaces_by_cell; ++ff) { const int face_index = cells_to_faces[(cell_to_faces_off + ff)]; const int neighbour_index = (faces_to_cells0[(face_index)] == cc) ? faces_to_cells1[(face_index)] : faces_to_cells0[(face_index)]; if (neighbour_index == -1) { continue; } energy[(neighbour_index)] -= (die_avail_donate_neighbour[(ff)] / die_total_avail_donate) * die_need_receive; } } else if (die_need_donate > 0.0) { // Loop over the nodes attached to this node energy[(cc)] = gmax_ie; for (int ff = 0; ff < nfaces_by_cell; ++ff) { const int face_index = cells_to_faces[(cell_to_faces_off + ff)]; const int neighbour_index = (faces_to_cells0[(face_index)] == cc) ? faces_to_cells1[(face_index)] : faces_to_cells0[(face_index)]; if (neighbour_index == -1) { continue; } energy[(neighbour_index)] += (die_avail_receive_neighbour[(ff)] / die_total_avail_receive) * die_need_donate; } } if (die_total_avail_donate < die_need_receive || die_total_avail_receive < die_need_donate) { printf("Repair stage needs additional level.\n"); continue; } } } // Repairs the subcell extrema for mass void repair_subcell_extrema(const int ncells, const int* cells_to_nodes_offsets, const int* subcells_to_subcells_offsets, const int* subcells_to_subcells, double* subcell_volume, double* subcell_mass) { #pragma omp parallel for for (int cc = 0; cc < ncells; ++cc) { const int cell_to_nodes_off = cells_to_nodes_offsets[(cc)]; const int nnodes_by_cell = cells_to_nodes_offsets[(cc + 1)] - cell_to_nodes_off; // Looping over corner subcells here for (int nn = 0; nn < nnodes_by_cell; ++nn) { const int subcell_index = cell_to_nodes_off + nn; const int subcell_to_subcells_off = subcells_to_subcells_offsets[(subcell_index)]; const int nsubcell_neighbours = subcells_to_subcells_offsets[(subcell_index + 1)] - subcell_to_subcells_off; const double subcell_vol = subcell_volume[(subcell_index)]; const double subcell_m_density = subcell_mass[(subcell_index)] / subcell_vol; double gmax_m = -DBL_MAX; double gmin_m = DBL_MAX; double dm_avail_donate = 0.0; double dm_avail_receive = 0.0; double dm_avail_donate_neighbour[(nsubcell_neighbours)]; double dm_avail_receive_neighbour[(nsubcell_neighbours)]; // Loop over neighbours for (int ss = 0; ss < nsubcell_neighbours; ++ss) { const int neighbour_index = subcells_to_subcells[(subcell_to_subcells_off + ss)]; // Ignore boundary neighbours if (neighbour_index == -1) { continue; } const int neighbour_to_subcells_off = subcells_to_subcells_offsets[(neighbour_index)]; const int nneighbour_neighbours = subcells_to_subcells_offsets[(neighbour_index + 1)] - neighbour_to_subcells_off; const double neighbour_vol = subcell_volume[(neighbour_index)]; const double neighbour_m_density = subcell_mass[(neighbour_index)] / neighbour_vol; double neighbour_gmax_m = -DBL_MAX; double neighbour_gmin_m = DBL_MAX; // Loop over neighbour's neighbours for (int ss2 = 0; ss2 < nneighbour_neighbours; ++ss2) { const int neighbour_neighbour_index = subcells_to_subcells[(neighbour_to_subcells_off + ss2)]; // Ignore boundary neighbours if (neighbour_neighbour_index == -1) { continue; } const double neighbour_neighbour_vol = subcell_volume[(neighbour_neighbour_index)]; const double neighbour_neighbour_m_density = subcell_mass[(neighbour_neighbour_index)] / neighbour_neighbour_vol; // Store the maximum / minimum values for rho in the neighbourhood neighbour_gmax_m = max(neighbour_gmax_m, neighbour_neighbour_m_density); neighbour_gmin_m = min(neighbour_gmin_m, neighbour_neighbour_m_density); } dm_avail_donate_neighbour[(ss)] = max((neighbour_m_density - neighbour_gmin_m) * subcell_vol, 0.0); dm_avail_receive_neighbour[(ss)] = max((neighbour_gmax_m - neighbour_m_density) * subcell_vol, 0.0); dm_avail_donate += dm_avail_donate_neighbour[(ss)]; dm_avail_receive += dm_avail_receive_neighbour[(ss)]; gmax_m = max(gmax_m, neighbour_m_density); gmin_m = min(gmin_m, neighbour_m_density); } const double dm_need_receive = (gmin_m - subcell_m_density) * subcell_vol; const double dm_need_donate = (subcell_m_density - gmax_m) * subcell_vol; if (dm_need_receive > 0.0) { redistribute_subcell_mass(subcell_mass, subcell_index, nsubcell_neighbours, subcells_to_subcells, subcell_to_subcells_off, dm_avail_donate_neighbour, dm_avail_donate, dm_need_receive, gmin_m, subcell_vol, 1); } else if (dm_need_donate > 0.0) { redistribute_subcell_mass(subcell_mass, subcell_index, nsubcell_neighbours, subcells_to_subcells, subcell_to_subcells_off, dm_avail_receive_neighbour, dm_avail_receive, dm_need_donate, gmax_m, subcell_vol, 0); } if (dm_avail_donate < dm_need_receive || dm_avail_receive < dm_need_donate) { printf("dm_avail_donate %.12e dm_need_receive %.12e dm_avail_receive " "%.12e dm_need_donate %.12e\n", dm_avail_donate, dm_need_receive, dm_avail_receive, dm_need_donate); printf("Repair stage needs additional level.\n"); continue; } } } } // Redistributes the mass according to the determined neighbour availability void redistribute_subcell_mass(double* mass, const int subcell_index, const int nsubcell_neighbours, const int* subcells_to_subcells, const int subcell_to_subcells_off, const double* dmass_avail_neighbour, const double dmass_avail, const double dmass_need, const double g, const double subcell_vol, const int is_min) { mass[(subcell_index)] = g * subcell_vol; // Loop over neighbours for (int ss = 0; ss < nsubcell_neighbours; ++ss) { const int neighbour_index = subcells_to_subcells[(subcell_to_subcells_off + ss)]; mass[(neighbour_index)] += (is_min ? -1.0 : 1.0) * (dmass_avail_neighbour[(ss)] / dmass_avail) * dmass_need; } }
convolution_pack1to4_bf16s.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void convolution_transform_kernel_pack1to4_bf16s_neon(const Mat& weight_data, Mat& weight_data_bf16, int num_input, int num_output, int kernel_w, int kernel_h) { const int maxk = kernel_w * kernel_h; // src = kw-kh-inch-outch // dst = 4b-kw-kh-inch-outch/4b Mat weight_data_r2 = weight_data.reshape(maxk, num_input, num_output); weight_data_bf16.create(maxk, num_input, num_output / 4, (size_t)2 * 4, 4); for (int q = 0; q + 3 < num_output; q += 4) { const Mat k0 = weight_data_r2.channel(q); const Mat k1 = weight_data_r2.channel(q + 1); const Mat k2 = weight_data_r2.channel(q + 2); const Mat k3 = weight_data_r2.channel(q + 3); unsigned short* g00 = weight_data_bf16.channel(q / 4); for (int p = 0; p < num_input; p++) { const float* k00 = k0.row(p); const float* k10 = k1.row(p); const float* k20 = k2.row(p); const float* k30 = k3.row(p); for (int k = 0; k < maxk; k++) { g00[0] = float32_to_bfloat16(k00[k]); g00[1] = float32_to_bfloat16(k10[k]); g00[2] = float32_to_bfloat16(k20[k]); g00[3] = float32_to_bfloat16(k30[k]); g00 += 4; } } } } static void convolution_pack1to4_bf16s_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_bf16, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int maxk = kernel_w * kernel_h; // kernel offsets std::vector<int> _space_ofs(maxk); int* space_ofs = &_space_ofs[0]; { int p1 = 0; int p2 = 0; int gap = w * dilation_h - kernel_w * dilation_w; for (int i = 0; i < kernel_h; i++) { for (int j = 0; j < kernel_w; j++) { space_ofs[p1] = p2; p1++; p2 += dilation_w; } p2 += gap; } } const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { unsigned short* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float32x4_t _sum = vdupq_n_f32(0.f); if (bias_data_ptr) { _sum = vld1q_f32(bias_data_ptr + p * 4); } const unsigned short* kptr = weight_data_bf16.channel(p); // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); const unsigned short* sptr = m.row<const unsigned short>(i * stride_h) + j * stride_w; for (int k = 0; k < maxk; k++) { float32x4_t _val = vdupq_n_f32(bfloat16_to_float32(sptr[space_ofs[k]])); float32x4_t _w = vcvt_f32_bf16(vld1_u16(kptr)); _sum = vmlaq_f32(_sum, _val, _w); kptr += 4; } } _sum = activation_ps(_sum, activation_type, activation_params); vst1_u16(outptr + j * 4, vcvt_bf16_f32(_sum)); } outptr += outw * 4; } } }
GB_unaryop__ainv_int8_fp32.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2019, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_int8_fp32 // op(A') function: GB_tran__ainv_int8_fp32 // C type: int8_t // A type: float // cast: int8_t cij ; GB_CAST_SIGNED(cij,aij,8) // unaryop: cij = -aij #define GB_ATYPE \ float #define GB_CTYPE \ int8_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, x) \ int8_t z ; GB_CAST_SIGNED(z,x,8) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (x, aij) ; \ GB_OP (GB_CX (pC), x) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_INT8 || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_int8_fp32 ( int8_t *restrict Cx, const float *restrict Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (int64_t p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_int8_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t **Rowcounts, GBI_single_iterator Iter, const int64_t *restrict A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
cgetrs.c
/** * * @file * * PLASMA is a software package provided by: * University of Tennessee, US, * University of Manchester, UK. * * @generated from /home/luszczek/workspace/plasma/bitbucket/plasma/compute/zgetrs.c, normal z -> c, Fri Sep 28 17:38:06 2018 * **/ #include "plasma.h" #include "plasma_async.h" #include "plasma_context.h" #include "plasma_descriptor.h" #include "plasma_internal.h" #include "plasma_tuning.h" #include "plasma_types.h" #include "plasma_workspace.h" /***************************************************************************//** * ******************************************************************************/ int plasma_cgetrs(int n, int nrhs, plasma_complex32_t *pA, int lda, int *ipiv, plasma_complex32_t *pB, int ldb) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); return PlasmaErrorNotInitialized; } if (n < 0) { plasma_error("illegal value of n"); return -1; } if (nrhs < 0) { plasma_error("illegal value of nrhs"); return -2; } if (lda < imax(1, n)) { plasma_error("illegal value of lda"); return -4; } if (ldb < imax(1, n)) { plasma_error("illegal value of ldb"); return -7; } // quick return if (imin(n, nrhs) == 0) return PlasmaSuccess; // Tune parameters. if (plasma->tuning) plasma_tune_trsm(plasma, PlasmaComplexFloat, n, n); // Set tiling parameters. int nb = plasma->nb; // Initialize barrier. plasma_barrier_init(&plasma->barrier); // Create tile matrix. plasma_desc_t A; plasma_desc_t B; int retval; retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, n, n, 0, 0, n, n, &A); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } retval = plasma_desc_general_create(PlasmaComplexFloat, nb, nb, n, nrhs, 0, 0, n, nrhs, &B); if (retval != PlasmaSuccess) { plasma_error("plasma_desc_general_create() failed"); return retval; } // Initialize sequence. plasma_sequence_t sequence; retval = plasma_sequence_init(&sequence); // Initialize request. plasma_request_t request; retval = plasma_request_init(&request); #pragma omp parallel #pragma omp master { // Translate to tile layout. plasma_omp_cge2desc(pA, lda, A, &sequence, &request); plasma_omp_cge2desc(pB, ldb, B, &sequence, &request); // Call the tile async function. plasma_omp_cgetrs(A, ipiv, B, &sequence, &request); // Translate back to LAPACK layout. plasma_omp_cdesc2ge(B, pB, ldb, &sequence, &request); } // Free matrix A in tile layout. plasma_desc_destroy(&A); plasma_desc_destroy(&B); // Return status. int status = sequence.status; return status; } /***************************************************************************//** * ******************************************************************************/ void plasma_omp_cgetrs(plasma_desc_t A, int *ipiv, plasma_desc_t B, plasma_sequence_t *sequence, plasma_request_t *request) { // Get PLASMA context. plasma_context_t *plasma = plasma_context_self(); if (plasma == NULL) { plasma_fatal_error("PLASMA not initialized"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // Check input arguments. if (plasma_desc_check(A) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid A"); return; } if (plasma_desc_check(B) != PlasmaSuccess) { plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); plasma_error("invalid B"); return; } if (sequence == NULL) { plasma_fatal_error("NULL sequence"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } if (request == NULL) { plasma_fatal_error("NULL request"); plasma_request_fail(sequence, request, PlasmaErrorIllegalValue); return; } // quick return if (A.n == 0 || B.n == 0) return; // Call the parallel functions. plasma_pcgeswp(PlasmaRowwise, B, ipiv, 1, sequence, request); plasma_pctrsm(PlasmaLeft, PlasmaLower, PlasmaNoTrans, PlasmaUnit, 1.0, A, B, sequence, request); plasma_pctrsm(PlasmaLeft, PlasmaUpper, PlasmaNoTrans, PlasmaNonUnit, 1.0, A, B, sequence, request); }
GB_binop__second_uint64.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__second_uint64) // A.*B function (eWiseMult): GB (_AemultB_08__second_uint64) // A.*B function (eWiseMult): GB (_AemultB_02__second_uint64) // A.*B function (eWiseMult): GB (_AemultB_04__second_uint64) // A.*B function (eWiseMult): GB (_AemultB_bitmap__second_uint64) // A*D function (colscale): GB (_AxD__second_uint64) // D*A function (rowscale): GB (_DxB__second_uint64) // C+=B function (dense accum): GB (_Cdense_accumB__second_uint64) // C+=b function (dense accum): GB (_Cdense_accumb__second_uint64) // C+=A+B function (dense ewise3): GB ((none)) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__second_uint64) // C=scalar+B GB ((none)) // C=scalar+B' GB ((none)) // C=A+scalar GB ((none)) // C=A'+scalar GB ((none)) // C type: uint64_t // A type: uint64_t // B,b type: uint64_t // BinaryOp: cij = bij #define GB_ATYPE \ uint64_t #define GB_BTYPE \ uint64_t #define GB_CTYPE \ uint64_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ ; // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint64_t bij = GBX (Bx, pB, B_iso) // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint64_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = y ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 1 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_SECOND || GxB_NO_UINT64 || GxB_NO_SECOND_UINT64) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ #if 0 // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } #endif //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_ewise3_noaccum__second_uint64) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_dense_ewise3_noaccum_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__second_uint64) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__second_uint64) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint64_t uint64_t bwork = (*((uint64_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__second_uint64) ( GrB_Matrix C, const GrB_Matrix A, bool A_is_pattern, const GrB_Matrix D, bool D_is_pattern, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__second_uint64) ( GrB_Matrix C, const GrB_Matrix D, bool D_is_pattern, const GrB_Matrix B, bool B_is_pattern, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *restrict Cx = (uint64_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__second_uint64) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; #include "GB_add_template.c" GB_FREE_WORK ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__second_uint64) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__second_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__second_uint64) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__second_uint64) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t x = (*((uint64_t *) x_input)) ; uint64_t *Bx = (uint64_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint64_t bij = GBX (Bx, p, false) ; Cx [p] = bij ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ #if 0 GrB_Info GB ((none)) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint64_t *Cx = (uint64_t *) Cx_output ; uint64_t *Ax = (uint64_t *) Ax_input ; uint64_t y = (*((uint64_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; ; ; Cx [p] = y ; } return (GrB_SUCCESS) ; #endif } #endif //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint64_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = aij ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint64_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t x = (*((const uint64_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint64_t } #endif //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ #if 0 // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ ; ; \ Cx [pC] = y ; \ } GrB_Info GB ((none)) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint64_t y = (*((const uint64_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif #endif
CPUWCOJ.h
#ifndef __CPU_WORST_CASE_OPTIMAL_JOIN_H__ #define __CPU_WORST_CASE_OPTIMAL_JOIN_H__ #include <pthread.h> #include "CPUFilter.h" #include "CPUGraph.h" #include "CPUIntersection.h" #include "CPUPatternMatch.h" #include "TimeMeasurer.h" #if defined(OPENMP) #include <omp.h> #endif class CPUWCOJoin : public CPUPatternMatch { public: CPUWCOJoin(TraversalPlan *plan, Graph *graph, size_t thread_num) : CPUPatternMatch(thread_num), plan_(plan), graph_(graph) {} virtual void Execute() { std::vector<std::vector<uintV> > intersect_levels; plan_->GetOrderedConnectivity(intersect_levels); AllCondType conditions; plan_->GetOrderedOrdering(conditions); omp_set_num_threads(thread_num_); TimeMeasurer timer; timer.StartTimer(); long long total_match_count = 0; auto paths = new uintV *[thread_num_]; for (size_t i = 0; i < thread_num_; ++i) { paths[i] = new uintV[plan_->GetVertexCount()]; } #pragma omp parallel for schedule(dynamic) reduction(+ : total_match_count) for (uintV u = 0; u < graph_->GetVertexCount(); ++u) { long long ans = 0; size_t thread_id = omp_get_thread_num(); #if defined(PROFILE) TimeMeasurer timer2; timer2.StartTimer(); #endif paths[thread_id][0] = u; DFS(thread_id, 1, paths[thread_id], ans, intersect_levels, conditions); #if defined(PROFILE) timer2.EndTimer(); thread_time_[thread_id] += timer2.GetElapsedMicroSeconds(); #endif total_match_count += ans; } for (size_t i = 0; i < thread_num_; ++i) { delete[] paths[i]; paths[i] = NULL; } delete[] paths; paths = NULL; timer.EndTimer(); this->SetTotalMatchCount(total_match_count); std::cout << "total_match_count=" << total_match_count << ", elapsed_time=" << timer.GetElapsedMicroSeconds() / 1000.0 << "ms" << std::endl; #if defined(PROFILE) for (size_t thread_id = 0; thread_id < thread_num_; ++thread_id) { std::cout << "thread_id=" << thread_id << ",time=" << thread_time_[thread_id] / 1000.0 << "ms" << std::endl; } #endif } private: void DFS(size_t thread_id, uintV cur_level, uintV *path, long long &ans, AllConnType &intersect_levels, AllCondType &conditions) { if (cur_level == plan_->GetVertexCount()) { ans++; return; } if (intersect_levels[cur_level].size() == 0) { for (uintV i = 0; i < graph_->GetVertexCount(); ++i) { if (CheckCondition(path, i, conditions[cur_level]) == false || CheckEquality(path, cur_level, i)) continue; path[cur_level] = i; DFS(thread_id, cur_level + 1, path, ans, intersect_levels, conditions); } } else { std::vector<uintV> candidates; MWayIntersect<HOME_MADE>(path, graph_->GetRowPtrs(), graph_->GetCols(), intersect_levels[cur_level], candidates); for (size_t i = 0; i < candidates.size(); ++i) { if (CheckCondition(path, candidates[i], conditions[cur_level]) == false || CheckEquality(path, cur_level, candidates[i])) continue; path[cur_level] = candidates[i]; DFS(thread_id, cur_level + 1, path, ans, intersect_levels, conditions); } } } private: TraversalPlan *plan_; Graph *graph_; }; #endif
server.c
#include <stdio.h> #include <stdlib.h> #include <stdint.h> #include <sys/types.h> #include <sys/socket.h> #include <netinet/in.h> #include <arpa/inet.h> #include <unistd.h> #include <netdb.h> #include <strings.h> #include <string.h> #include <assert.h> #include <signal.h> #include <errno.h> #include <sys/stat.h> #include <fcntl.h> #include <sys/mman.h> #include <time.h> #include <stdarg.h> #include <limits.h> #include <inttypes.h> #include <netinet/tcp.h> #include <sys/time.h> #include <sys/resource.h> #include "compat.h" const uint16_t serverport = 5006; static volatile sig_atomic_t quit = 0; static int fd; void signal_handler(int i) { (void)i; quit = 1; shutdown(fd, SHUT_RDWR); } #define ERR "ERROR: " #define WARN "WARNING: " #define DBG "DEBUG: " #define INFO "INFO: " int message(const char *format, ...) { va_list ap; time_t now = time(NULL); char buf[26]; int n; ctime_r(&now, buf); buf[strlen(buf)-1] = 0; #pragma omp critical { n = printf("[%s] ", buf); va_start(ap, format); n += vprintf(format, ap); va_end(ap); fflush(stdout); } return n; } int init_sockaddr(struct sockaddr_in *name, uint16_t port) { assert(name); bzero(name, sizeof(struct sockaddr_in)); name->sin_family = AF_INET; name->sin_addr.s_addr = INADDR_ANY; name->sin_port = htons(port); return 0; } ssize_t write_(int fd, const char *buf, size_t count) { ssize_t written = 0; while ((size_t)written < count) { ssize_t t = write(fd, buf+written, count - written); if (t < 0) { /* errno is set appropriately */ return -1; } if (t == 0 && (size_t)(written + t) != count) { /* zero indicates nothing was written */ return -1; } written += t; } return written; } ssize_t read_(int fd, char *buf, size_t count) { /* I known this isn't a word in English, but "read" was already taken. */ ssize_t readen = 0; while ((size_t)readen < count) { ssize_t t = read(fd, buf + readen, count - readen); if (t < 0) { /* errno is set appropriately */ return -1; } if (t == 0 && (size_t)(readen + t) != count) { /* zero indicates end of file */ return -1; } readen += t; } return readen; } int read_uint64(int fd, uint64_t *nptr) { uint32_t nh, nl; uint64_t n; if (read_(fd, (void *)&nh, 4) < 0) { return -1; } if (read_(fd, (void *)&nl, 4) < 0) { return -1; } nh = ntohl(nh); nl = ntohl(nl); n = ((uint64_t)nh << 32) + nl; assert( nptr != NULL ); *nptr = n; return 0; } int write_uint64(int fd, uint64_t n) { uint32_t nh, nl; nh = (uint32_t)(n >> 32); nl = (uint32_t)(n); nh = htonl(nh); nl = ntohl(nl); if (write_(fd, (void *)&nh, 4) < 0) { return -1; } if (write_(fd, (void *)&nl, 4) < 0) { return -1; } return 0; } #define TASK_SIZE 40 /* 2^32 assignments (tasks) */ #define ASSIGNMENTS_NO (UINT64_C(1) << 32) #define MAP_SIZE (ASSIGNMENTS_NO >> 3) #define RECORDS_SIZE (ASSIGNMENTS_NO * 8) #define IS_ASSIGNED(n) ( ( g_map_assigned[ (n)>>3 ] >> ((n)&7) ) & 1 ) #define IS_COMPLETE(n) ( ( g_map_complete[ (n)>>3 ] >> ((n)&7) ) & 1 ) #define SET_ASSIGNED(n) ( g_map_assigned[(n)>>3] |= (1<<((n)&7)) ) #define SET_UNASSIGNED(n) ( g_map_assigned[(n)>>3] &= UCHAR_MAX ^ (1<<((n)&7)) ) #define SET_COMPLETE(n) ( g_map_complete[(n)>>3] |= (1<<((n)&7)) ) #define SET_INCOMPLETE(n) ( g_map_complete[(n)>>3] &= UCHAR_MAX ^ (1<<((n)&7)) ) uint64_t g_lowest_unassigned = 0; /* bit index, not byte */ uint64_t g_lowest_incomplete = 0; unsigned char *g_map_assigned; unsigned char *g_map_complete; uint64_t *g_checksums; uint64_t *g_usertimes; uint64_t *g_overflows; uint64_t *g_clientids; uint64_t *g_mxoffsets; int set_complete(uint64_t n) { if (IS_COMPLETE(n)) { message(INFO "assignment %" PRIu64 " was already complete (duplicate result)\n", n); } if (!IS_ASSIGNED(n)) { message(ERR "assignment %" PRIu64 " was not assigned, discarting the result!\n", n); return -1; } SET_COMPLETE(n); /* advance g_lowest_incomplete pointer */ if (n == g_lowest_incomplete) { for (; IS_COMPLETE(g_lowest_incomplete); ++g_lowest_incomplete) ; } return 0; } uint64_t get_assignment() { uint64_t n = g_lowest_unassigned; SET_ASSIGNED(n); /* advance g_lowest_unassigned */ for (; IS_ASSIGNED(g_lowest_unassigned); ++g_lowest_unassigned) ; return n; } void unset_assignment(uint64_t n) { if (IS_COMPLETE(n)) { message(WARN "assignment %" PRIu64 " is already complete, invalid interrupt request!\n", n); } SET_UNASSIGNED(n); if (g_lowest_unassigned > n) { g_lowest_unassigned = n; } } uint64_t get_missed_assignment(int thread_id) { uint64_t n = g_lowest_incomplete; int t; for (t = 0; t < thread_id; ++t) { n++; while (IS_COMPLETE(n)) { n++; } } SET_ASSIGNED(n); /* advance g_lowest_unassigned */ if (n == g_lowest_unassigned) { for (; IS_ASSIGNED(g_lowest_unassigned); ++g_lowest_unassigned) ; } return n; } void *open_map(const char *path) { int fd = open(path, O_RDWR | O_CREAT, 0600); void *ptr; if (fd < 0) { perror("open"); abort(); } if (ftruncate(fd, (off_t)MAP_SIZE) < 0) { perror("ftruncate"); abort(); } ptr = mmap(NULL, (size_t)MAP_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (ptr == MAP_FAILED) { perror("mmap"); abort(); } close(fd); return ptr; } uint64_t *open_records(const char *path) { int fd = open(path, O_RDWR | O_CREAT, 0600); void *ptr; if (fd < 0) { perror("open"); abort(); } if (ftruncate(fd, (off_t)RECORDS_SIZE) < 0) { perror("ftruncate"); abort(); } ptr = mmap(NULL, (size_t)RECORDS_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (ptr == MAP_FAILED) { perror("mmap"); abort(); } close(fd); return (uint64_t *)ptr; } int read_message(int fd, int thread_id, const char *ipv4) { char protocol_version; char msg[4]; if (read_(fd, msg, 4) < 0) { return -1; } protocol_version = msg[3]; /* unsupported protocol */ if (protocol_version > 2) { message(ERR "unsupported protocol version\n"); return -1; } msg[3] = 0; if (strcmp(msg, "MUL") == 0) { uint64_t threads; int tid; read_uint64(fd, &threads); message(INFO "received multiple requests for %" PRIu64 " threads from address %s\n", threads, ipv4); assert(threads < INT_MAX); for (tid = 0; tid < (int)threads; ++tid) { if (read_message(fd, tid, ipv4) < 0) { message(ERR "cannot completely process the MUL request\n"); return -1; } } } else if (strcmp(msg, "REQ") == 0) { /* requested assignment */ uint64_t n; uint64_t clid = 0; n = get_assignment(); message(INFO "assignment requested: %" PRIu64 "\n", n); if (write_uint64(fd, n) < 0) { return -1; } if (write_uint64(fd, TASK_SIZE) < 0) { message(ERR "unable to write task size, update the client!\n"); return -1; } if (read_uint64(fd, &clid) < 0) { message(ERR "client does not send client ID\n"); unset_assignment(n); return -1; } if (g_clientids[n] != 0) { message(WARN "assignment %" PRIu64 " was already assigned to another client, re-assigning\n", n); } g_clientids[n] = clid; } else if (strcmp(msg, "RET") == 0) { /* returning assignment */ uint64_t n; uint64_t task_size = 0; uint64_t overflow_counter = 0; uint64_t user_time = 0; uint64_t checksum = 0; uint64_t clid = 0; uint64_t mxoffset = 0; uint64_t cycleoff = 0; if (read_uint64(fd, &n) < 0) { return -1; } if (read_uint64(fd, &task_size) < 0) { message(ERR "unable to read task size, update the client!\n"); return -1; } if (task_size != TASK_SIZE) { message(ERR "TASK_SIZE mismatch! (client sent %" PRIu64 ")\n", task_size); return -1; } if (read_uint64(fd, &overflow_counter) < 0) { message(ERR "client does not send the overflow counter!\n"); return -1; } if (read_uint64(fd, &user_time) < 0) { message(ERR "client does not send the user time!\n"); return -1; } if (read_uint64(fd, &checksum) < 0) { message(ERR "client does not send the check sum!\n"); return -1; } if (read_uint64(fd, &clid) < 0) { message(ERR "client does not send client ID\n"); return -1; } if (protocol_version > 0) { if (read_uint64(fd, &mxoffset) < 0) { message(ERR "client does not send maximum value offset\n"); return -1; } } if (protocol_version > 1) { if (read_uint64(fd, &cycleoff) < 0) { message(ERR "client does not send maximum cycle offset\n"); return -1; } } if (g_clientids[n] != clid) { message(WARN "assignment %" PRIu64 " was assigned to another client, ignoring the result! (done in %" PRIu64 " secs)\n", n, user_time); /* this can however be part of MUL request, so do not return -1 */ return 0; } if (clid == 42) { message(WARN "client ID is 42 :(\n"); } if (user_time == 0 && checksum == 0) { message(ERR "broken client, discarting the result!\n"); return -1; } if (checksum == 0) { message(ERR "zero checksum is invalid! (assignment %" PRIu64 ")\n", n); return -1; } if ((checksum>>23) != 196126 && (checksum>>24) != 0xa0ed && (checksum>>24) != 0x4cfe && (checksum>>24) != 0x3354 && (checksum>>28) != 0x83b && (checksum>>24) != 0x2a27 && (checksum>>24) != 0x5ae2 && (checksum>>24) != 0x5ae1 && (checksum>>24) != 0x3c96 && (checksum>>24) != 0x27d8 && (checksum>>24) != 0x2134) { message(ERR "suspicious checksum (%" PRIu64 ", 0x%" PRIx64 "), done in %" PRIu64 " secs, rejecting the result! (assignment %" PRIu64 ")\n", checksum, checksum, user_time, n); return -1; } message(INFO "assignment returned: %" PRIu64 " (%" PRIu64 " overflows, time %" PRIu64 ":%02" PRIu64 ":%02" PRIu64 ", checksum 0x%016" PRIx64 ")\n", n, overflow_counter, user_time/60/60, user_time/60%60, user_time%60, checksum); if (set_complete(n) < 0) { message(ERR "result rejected!\n"); /* this can however be part of MUL request, so do not return -1 */ return 0; } if (g_checksums[n] != 0 && g_checksums[n] != checksum) { message(ERR "checksums do not match! (the other checksum was %" PRIu64 ", 0x%016" PRIx64 ")\n", g_checksums[n], g_checksums[n]); } g_checksums[n] = checksum; g_usertimes[n] = user_time; g_overflows[n] = overflow_counter; if (g_mxoffsets[n] != 0 && g_mxoffsets[n] != mxoffset) { message(ERR "mxoffsets do not match! (the other mxoffset was +%" PRIu64 ")\n", g_mxoffsets[n]); } g_mxoffsets[n] = mxoffset; g_clientids[n] = 0; } else if (strcmp(msg, "req") == 0) { /* requested lowest incomplete assignment */ uint64_t n; uint64_t clid = 0; n = get_missed_assignment(thread_id); message(INFO "assignment requested: %" PRIu64 " (lowest incomplete +%i)\n", n, thread_id); if (write_uint64(fd, n) < 0) { return -1; } if (write_uint64(fd, TASK_SIZE) < 0) { message(ERR "unable to write task size, update the client!\n"); return -1; } if (read_uint64(fd, &clid) < 0) { message(ERR "client does not send client ID\n"); return -1; } if (g_clientids[n] != 0) { message(WARN "re-assigning the assignment\n"); } g_clientids[n] = clid; } else if (strcmp(msg, "INT") == 0) { /* interrupted or unable to solve, unreserve the assignment */ uint64_t n; uint64_t task_size = 0; uint64_t clid = 0; if (read_uint64(fd, &n) < 0) { return -1; } if (read_uint64(fd, &task_size) < 0) { message(ERR "unable to read task size, update the client!\n"); return -1; } if (read_uint64(fd, &clid) < 0) { message(ERR "client does not send client ID\n"); return -1; } if (g_clientids[n] != clid) { message(WARN "invalid request, assignment was assigned to another client, ignoring the request!\n"); /* this can be part of MUL request, so do not return -1 */ return 0; } if (task_size != TASK_SIZE) { message(ERR "TASK_SIZE mismatch!\n"); return -1; } message(INFO "assignment interrupted: %" PRIu64 "\n", n); unset_assignment(n); g_clientids[n] = 0; } else if (strcmp(msg, "LOI") == 0) { if (write_uint64(fd, g_lowest_incomplete) < 0) { return -1; } if (write_uint64(fd, TASK_SIZE) < 0) { return -1; } } else if (strcmp(msg, "HIR") == 0) { if (write_uint64(fd, g_lowest_unassigned) < 0) { return -1; } if (write_uint64(fd, TASK_SIZE) < 0) { return -1; } } else { message(ERR "%s: unknown client message!\n", msg); return -1; } return 0; } void set_incomplete_superblock(uint64_t sb) { uint64_t n; uint64_t c = 0; for (n = 0; n < (sb + 1) << 20; ++n) { uint64_t checksum; assert(n < ASSIGNMENTS_NO); checksum = g_checksums[n]; if (!checksum) { #if 0 printf("- resetting the assignment %" PRIu64 " due to user request\n", n); #endif #if 1 SET_UNASSIGNED(n); SET_INCOMPLETE(n); #endif c++; } } message(WARN "reset %" PRIu64 " assignments (superblock %" PRIu64 ")\n", c, sb); } int main(int argc, char *argv[]) { struct sockaddr_in server_addr; int reuse = 1; struct rlimit rlim; struct timeval timeout; int opt; int clear_incomplete_assigned = 0; int fix_records = 0; int invalidate_overflows = 0; int invalidate_new = 0; int reset_sb = 0; uint64_t sb; fd = socket(AF_INET, SOCK_STREAM, 0); while ((opt = getopt(argc, argv, "cfizr:")) != -1) { switch (opt) { case 'c': clear_incomplete_assigned = 1; break; case 'f': fix_records = 1; break; case 'i': invalidate_overflows = 1; break; case 'z': invalidate_new = 1; break; case 'r': reset_sb = 1; sb = atou64(optarg); break; default: message(ERR "Usage: %s [-c] [-f] [-i] [-z] [-r]\n", argv[0]); return EXIT_FAILURE; } } if (getrlimit(RLIMIT_NOFILE, &rlim) < 0) { /* errno is set appropriately. */ perror("getrlimit"); } else { assert( sizeof(uint64_t) >= sizeof(rlim_t) ); message(INFO "limit file descriptors: %" PRIu64 " (hard %" PRIu64 ")\n", rlim.rlim_cur, rlim.rlim_max); } message(INFO "starting server...\n"); g_map_assigned = open_map("assigned.map"); g_map_complete = open_map("complete.map"); g_checksums = open_records("checksums.dat"); g_usertimes = open_records("usertimes.dat"); g_overflows = open_records("overflows.dat"); g_clientids = open_records("clientids.dat"); g_mxoffsets = open_records("mxoffsets.dat"); if (invalidate_overflows) { uint64_t n; message(WARN "Invalidating overflows...\n"); for (n = 0; n < ASSIGNMENTS_NO; ++n) { uint64_t overflow = g_overflows[n]; if (overflow != 0) { printf("- resetting the assignment %" PRIu64 " due to overflow\n", n); SET_UNASSIGNED(n); SET_INCOMPLETE(n); } } } if (invalidate_new) { uint64_t n; uint64_t c = 0; message(WARN "Invalidating new/buggy/outdated/incomplete/obsolete checksums...\n"); #if 1 for (n = 0; n < ASSIGNMENTS_NO; ++n) { uint64_t checksum = g_checksums[n]; if ((checksum>>24) == 0x2134) { printf("- resetting the assignment %" PRIu64 " due to buggy/obsolete checksum\n", n); SET_UNASSIGNED(n); SET_INCOMPLETE(n); } } #endif for (n = 0; n < ASSIGNMENTS_NO; ++n) { uint64_t checksum = g_checksums[n]; uint64_t usertime = g_usertimes[n]; uint64_t mxoffset = g_mxoffsets[n]; if (checksum) { if (!usertime || !mxoffset) { printf("- resetting the assignment %" PRIu64 " due to incomplete record\n", n); SET_UNASSIGNED(n); SET_INCOMPLETE(n); c++; } } } message(WARN "invalidated %" PRIu64 " results\n", c); } /* fix records the *.map and *.dat */ if (fix_records) { uint64_t n; uint64_t c0 = 0, c1 = 0, c2 = 0; message(WARN "Processing the records...\n"); for (n = 0; n < ASSIGNMENTS_NO; ++n) { /* complete ==> assigned */ if (IS_COMPLETE(n) && !IS_ASSIGNED(n)) { SET_ASSIGNED(n); c0++; } /* complete ==> zero clid */ if (IS_COMPLETE(n) && g_clientids[n] != 0) { g_clientids[n] = 0; c1++; } /* not assigned ==> no clid */ if (!IS_ASSIGNED(n) && g_clientids[n] != 0) { g_clientids[n] = 0; c2++; } } message(WARN "These corrections have been made: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n", c0, c1, c2); } if (reset_sb) { set_incomplete_superblock(sb); } for (g_lowest_unassigned = 0; IS_ASSIGNED(g_lowest_unassigned); ++g_lowest_unassigned) ; for (g_lowest_incomplete = 0; IS_COMPLETE(g_lowest_incomplete); ++g_lowest_incomplete) ; if (clear_incomplete_assigned) { message(WARN "incomplete assignments will be cleared...\n"); while (g_lowest_unassigned > g_lowest_incomplete) { g_lowest_unassigned--; if (IS_ASSIGNED(g_lowest_unassigned) && !IS_COMPLETE(g_lowest_unassigned)) { SET_UNASSIGNED(g_lowest_unassigned); } } message(WARN "incomplete assignments have been cleared!\n"); } message(INFO "lowest unassigned = %" PRIu64 "\n", g_lowest_unassigned); message(INFO "lowest incomplete = %" PRIu64 "\n", g_lowest_incomplete); message(INFO "*** all numbers below %" PRIu64 " * 2^%" PRIu64 " are convergent (blocks) ***\n", g_lowest_incomplete, TASK_SIZE); message(INFO "*** all numbers below %" PRIu64 " * 2^%" PRIu64 " are convergent (superblocks) ***\n", g_lowest_incomplete >> 20, TASK_SIZE + 20); signal(SIGINT, signal_handler); signal(SIGTERM, signal_handler); if (fd < 0) { perror("socket"); abort(); } if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, (void *)&reuse, (socklen_t)sizeof(reuse)) < 0) { perror("setsockopt"); abort(); } /* TCP_NODELAY have to be either enabled or disabled on both sides (server as well as client), not just on one side! */ if (setsockopt(fd, IPPROTO_TCP, TCP_NODELAY, (void *)&reuse, (socklen_t)sizeof(reuse)) < 0) { perror("setsockopt"); abort(); } timeout.tv_sec = 10; timeout.tv_usec = 0; if (setsockopt(fd, SOL_SOCKET, SO_RCVTIMEO, (void *)&timeout, sizeof(timeout)) < 0) { perror("setsockopt failed\n"); } if (setsockopt(fd, SOL_SOCKET, SO_SNDTIMEO, (void *)&timeout, sizeof(timeout)) < 0) { perror("setsockopt failed\n"); } init_sockaddr(&server_addr, serverport); if (bind(fd, (struct sockaddr *) &server_addr, sizeof(server_addr)) < 0) { perror("bind"); abort(); } if (listen(fd, 10) < 0) { perror("listen"); abort(); } message(INFO "listening...\n"); while (1) { struct sockaddr_in sockaddr_in; socklen_t sockaddr_len = sizeof sockaddr_in; int cl_fd = accept(fd, &sockaddr_in, &sockaddr_len); const char *ipv4 = "(unknown)"; if (-1 == cl_fd) { if (quit) break; message(ERR "cannot accept a connection on a socket!\n"); } if (sockaddr_len >= sizeof sockaddr_in && sockaddr_in.sin_family == AF_INET) { ipv4 = inet_ntoa(sockaddr_in.sin_addr); } if (read_message(cl_fd, 0, ipv4) < 0) { message(ERR "client <--> server communication failure!\n"); } close(cl_fd); if (quit) break; } message(INFO "closing server socket...\n"); close(fd); msync(g_map_assigned, MAP_SIZE, MS_SYNC); msync(g_map_complete, MAP_SIZE, MS_SYNC); msync(g_checksums, RECORDS_SIZE, MS_SYNC); msync(g_usertimes, RECORDS_SIZE, MS_SYNC); msync(g_overflows, RECORDS_SIZE, MS_SYNC); msync(g_clientids, RECORDS_SIZE, MS_SYNC); msync(g_mxoffsets, RECORDS_SIZE, MS_SYNC); munmap(g_map_assigned, MAP_SIZE); munmap(g_map_complete, MAP_SIZE); munmap(g_checksums, RECORDS_SIZE); munmap(g_usertimes, RECORDS_SIZE); munmap(g_overflows, RECORDS_SIZE); munmap(g_clientids, RECORDS_SIZE); munmap(g_mxoffsets, RECORDS_SIZE); return 0; }
hypre_hopscotch_hash.h
/****************************************************************************** * Copyright 1998-2019 Lawrence Livermore National Security, LLC and other * HYPRE Project Developers. See the top-level COPYRIGHT file for details. * * SPDX-License-Identifier: (Apache-2.0 OR MIT) ******************************************************************************/ /** * Hopscotch hash is modified from the code downloaded from * https://sites.google.com/site/cconcurrencypackage/hopscotch-hashing * with the following terms of usage */ //////////////////////////////////////////////////////////////////////////////// //TERMS OF USAGE //------------------------------------------------------------------------------ // // Permission to use, copy, modify and distribute this software and // its documentation for any purpose is hereby granted without fee, // provided that due acknowledgments to the authors are provided and // this permission notice appears in all copies of the software. // The software is provided "as is". There is no warranty of any kind. // //Authors: // Maurice Herlihy // Brown University // and // Nir Shavit // Tel-Aviv University // and // Moran Tzafrir // Tel-Aviv University // // Date: July 15, 2008. // //////////////////////////////////////////////////////////////////////////////// // Programmer : Moran Tzafrir (MoranTza@gmail.com) // Modified : Jongsoo Park (jongsoo.park@intel.com) // Oct 1, 2015. // //////////////////////////////////////////////////////////////////////////////// #ifndef hypre_HOPSCOTCH_HASH_HEADER #define hypre_HOPSCOTCH_HASH_HEADER //#include <strings.h> #include <string.h> #include <stdio.h> #include <limits.h> #include <math.h> #ifdef HYPRE_USING_OPENMP #include <omp.h> #endif #include "_hypre_utilities.h" // Potentially architecture specific features used here: // __sync_val_compare_and_swap #ifdef __cplusplus extern "C" { #endif /****************************************************************************** * This next section of code is here instead of in _hypre_utilities.h to get * around some portability issues with Visual Studio. By putting it here, we * can explicitly include this '.h' file in a few files in hypre and compile * them with C++ instead of C (VS does not support C99 'inline'). ******************************************************************************/ #ifdef HYPRE_USING_ATOMIC static inline HYPRE_Int hypre_compare_and_swap(HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval) { #if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 return __sync_val_compare_and_swap(ptr, oldval, newval); //#elif defind _MSC_VER //return _InterlockedCompareExchange((long *)ptr, newval, oldval); //#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) // JSP: not many compilers have implemented this, so comment out for now //_Atomic HYPRE_Int *atomic_ptr = ptr; //atomic_compare_exchange_strong(atomic_ptr, &oldval, newval); //return oldval; #endif } static inline HYPRE_Int hypre_fetch_and_add(HYPRE_Int *ptr, HYPRE_Int value) { #if defined(__GNUC__) && defined(__GNUC_MINOR__) && defined(__GNUC_PATCHLEVEL__) && (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) > 40100 return __sync_fetch_and_add(ptr, value); //#elif defined _MSC_VER //return _InterlockedExchangeAdd((long *)ptr, value); //#elif defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L && !defined(__STDC_NO_ATOMICS__) // JSP: not many compilers have implemented this, so comment out for now //_Atomic HYPRE_Int *atomic_ptr = ptr; //return atomic_fetch_add(atomic_ptr, value); #endif } #else // !HYPRE_USING_ATOMIC static inline HYPRE_Int hypre_compare_and_swap(HYPRE_Int *ptr, HYPRE_Int oldval, HYPRE_Int newval) { if (*ptr == oldval) { *ptr = newval; return oldval; } else return *ptr; } static inline HYPRE_Int hypre_fetch_and_add(HYPRE_Int *ptr, HYPRE_Int value) { HYPRE_Int oldval = *ptr; *ptr += value; return oldval; } #endif // !HYPRE_USING_ATOMIC /******************************************************************************/ // Constants ................................................................ #define HYPRE_HOPSCOTCH_HASH_HOP_RANGE (32) #define HYPRE_HOPSCOTCH_HASH_INSERT_RANGE (4*1024) #define HYPRE_HOPSCOTCH_HASH_EMPTY (0) #define HYPRE_HOPSCOTCH_HASH_BUSY (1) // Small Utilities .......................................................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH static inline HYPRE_Int first_lsb_bit_indx(hypre_uint x) { return ffs(x) - 1; } #endif /** * hypre_Hash is adapted from xxHash with the following license. */ /* xxHash - Extremely Fast Hash algorithm Header File Copyright (C) 2012-2015, Yann Collet. BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php) Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. You can contact the author at : - xxHash source repository : https://github.com/Cyan4973/xxHash */ /*************************************** * Constants ***************************************/ #define HYPRE_XXH_PRIME32_1 2654435761U #define HYPRE_XXH_PRIME32_2 2246822519U #define HYPRE_XXH_PRIME32_3 3266489917U #define HYPRE_XXH_PRIME32_4 668265263U #define HYPRE_XXH_PRIME32_5 374761393U #define HYPRE_XXH_PRIME64_1 11400714785074694791ULL #define HYPRE_XXH_PRIME64_2 14029467366897019727ULL #define HYPRE_XXH_PRIME64_3 1609587929392839161ULL #define HYPRE_XXH_PRIME64_4 9650029242287828579ULL #define HYPRE_XXH_PRIME64_5 2870177450012600261ULL # define HYPRE_XXH_rotl32(x,r) ((x << r) | (x >> (32 - r))) # define HYPRE_XXH_rotl64(x,r) ((x << r) | (x >> (64 - r))) #if defined(HYPRE_MIXEDINT) || defined(HYPRE_BIGINT) static inline HYPRE_BigInt hypre_BigHash(HYPRE_BigInt input) { hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input); hypre_ulongint k1 = input; k1 *= HYPRE_XXH_PRIME64_2; k1 = HYPRE_XXH_rotl64(k1, 31); k1 *= HYPRE_XXH_PRIME64_1; h64 ^= k1; h64 = HYPRE_XXH_rotl64(h64, 27)*HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4; h64 ^= h64 >> 33; h64 *= HYPRE_XXH_PRIME64_2; h64 ^= h64 >> 29; h64 *= HYPRE_XXH_PRIME64_3; h64 ^= h64 >> 32; #ifndef NDEBUG if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64) { hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY); hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64); } #endif return h64; } #else static inline HYPRE_Int hypre_BigHash(HYPRE_Int input) { hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input); // 1665863975 is added to input so that // only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY. // Hence, we're fine as long as key is non-negative. h32 += (input + 1665863975)*HYPRE_XXH_PRIME32_3; h32 = HYPRE_XXH_rotl32(h32, 17)*HYPRE_XXH_PRIME32_4; h32 ^= h32 >> 15; h32 *= HYPRE_XXH_PRIME32_2; h32 ^= h32 >> 13; h32 *= HYPRE_XXH_PRIME32_3; h32 ^= h32 >> 16; //hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32); return h32; } #endif #ifdef HYPRE_BIGINT static inline HYPRE_Int hypre_Hash(HYPRE_Int input) { hypre_ulongint h64 = HYPRE_XXH_PRIME64_5 + sizeof(input); hypre_ulongint k1 = input; k1 *= HYPRE_XXH_PRIME64_2; k1 = HYPRE_XXH_rotl64(k1, 31); k1 *= HYPRE_XXH_PRIME64_1; h64 ^= k1; h64 = HYPRE_XXH_rotl64(h64, 27)*HYPRE_XXH_PRIME64_1 + HYPRE_XXH_PRIME64_4; h64 ^= h64 >> 33; h64 *= HYPRE_XXH_PRIME64_2; h64 ^= h64 >> 29; h64 *= HYPRE_XXH_PRIME64_3; h64 ^= h64 >> 32; #ifndef NDEBUG if (HYPRE_HOPSCOTCH_HASH_EMPTY == h64) { hypre_printf("hash(%lld) = %d\n", h64, HYPRE_HOPSCOTCH_HASH_EMPTY); hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h64); } #endif return h64; } #else static inline HYPRE_Int hypre_Hash(HYPRE_Int input) { hypre_uint h32 = HYPRE_XXH_PRIME32_5 + sizeof(input); // 1665863975 is added to input so that // only -1073741824 gives HYPRE_HOPSCOTCH_HASH_EMPTY. // Hence, we're fine as long as key is non-negative. h32 += (input + 1665863975)*HYPRE_XXH_PRIME32_3; h32 = HYPRE_XXH_rotl32(h32, 17)*HYPRE_XXH_PRIME32_4; h32 ^= h32 >> 15; h32 *= HYPRE_XXH_PRIME32_2; h32 ^= h32 >> 13; h32 *= HYPRE_XXH_PRIME32_3; h32 ^= h32 >> 16; //hypre_assert(HYPRE_HOPSCOTCH_HASH_EMPTY != h32); return h32; } #endif static inline void hypre_UnorderedIntSetFindCloserFreeBucket( hypre_UnorderedIntSet *s, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* start_seg, #endif HYPRE_Int *free_bucket, HYPRE_Int *free_dist ) { HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = s->hopInfo[move_bucket]; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]); if(start_seg != move_segment) omp_set_lock(&move_segment->lock); #endif if (start_hop_info == s->hopInfo[move_bucket]) { // new_free_bucket -> free_bucket and empty new_free_bucket HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist; s->key[*free_bucket] = s->key[new_free_bucket]; s->hash[*free_bucket] = s->hash[new_free_bucket]; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif s->hopInfo[move_bucket] |= (1U << move_free_dist); s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if(start_seg != move_segment) omp_unset_lock(&move_segment->lock); #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if(start_seg != move_segment) omp_unset_lock(&move_segment->lock); #endif } ++move_bucket; } *free_bucket = -1; *free_dist = 0; } static inline void hypre_UnorderedBigIntSetFindCloserFreeBucket( hypre_UnorderedBigIntSet *s, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* start_seg, #endif HYPRE_Int *free_bucket, HYPRE_Int *free_dist ) { HYPRE_Int move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = s->hopInfo[move_bucket]; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(s->segments[move_bucket & s->segmentMask]); if(start_seg != move_segment) omp_set_lock(&move_segment->lock); #endif if (start_hop_info == s->hopInfo[move_bucket]) { // new_free_bucket -> free_bucket and empty new_free_bucket HYPRE_Int new_free_bucket = move_bucket + move_new_free_dist; s->key[*free_bucket] = s->key[new_free_bucket]; s->hash[*free_bucket] = s->hash[new_free_bucket]; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif s->hopInfo[move_bucket] |= (1U << move_free_dist); s->hopInfo[move_bucket] &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if(start_seg != move_segment) omp_unset_lock(&move_segment->lock); #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if(start_seg != move_segment) omp_unset_lock(&move_segment->lock); #endif } ++move_bucket; } *free_bucket = -1; *free_dist = 0; } static inline void hypre_UnorderedIntMapFindCloserFreeBucket( hypre_UnorderedIntMap *m, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* start_seg, #endif hypre_HopscotchBucket** free_bucket, HYPRE_Int* free_dist) { hypre_HopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = move_bucket->hopInfo; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]); if (start_seg != move_segment) omp_set_lock(&move_segment->lock); #endif if (start_hop_info == move_bucket->hopInfo) { // new_free_bucket -> free_bucket and empty new_free_bucket hypre_HopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist; (*free_bucket)->data = new_free_bucket->data; (*free_bucket)->key = new_free_bucket->key; (*free_bucket)->hash = new_free_bucket->hash; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif move_bucket->hopInfo |= (1U << move_free_dist); move_bucket->hopInfo &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if(start_seg != move_segment) omp_unset_lock(&move_segment->lock); #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if(start_seg != move_segment) omp_unset_lock(&move_segment->lock); #endif } ++move_bucket; } *free_bucket = NULL; *free_dist = 0; } static inline void hypre_UnorderedBigIntMapFindCloserFreeBucket( hypre_UnorderedBigIntMap *m, #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* start_seg, #endif hypre_BigHopscotchBucket** free_bucket, HYPRE_Int* free_dist) { hypre_BigHopscotchBucket* move_bucket = *free_bucket - (HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1); HYPRE_Int move_free_dist; for (move_free_dist = HYPRE_HOPSCOTCH_HASH_HOP_RANGE - 1; move_free_dist > 0; --move_free_dist) { hypre_uint start_hop_info = move_bucket->hopInfo; HYPRE_Int move_new_free_dist = -1; hypre_uint mask = 1; HYPRE_Int i; for (i = 0; i < move_free_dist; ++i, mask <<= 1) { if (mask & start_hop_info) { move_new_free_dist = i; break; } } if (-1 != move_new_free_dist) { #ifdef HYPRE_CONCURRENT_HOPSCOTCH hypre_HopscotchSegment* move_segment = &(m->segments[(move_bucket - m->table) & m->segmentMask]); if (start_seg != move_segment) omp_set_lock(&move_segment->lock); #endif if (start_hop_info == move_bucket->hopInfo) { // new_free_bucket -> free_bucket and empty new_free_bucket hypre_BigHopscotchBucket* new_free_bucket = move_bucket + move_new_free_dist; (*free_bucket)->data = new_free_bucket->data; (*free_bucket)->key = new_free_bucket->key; (*free_bucket)->hash = new_free_bucket->hash; #ifdef HYPRE_CONCURRENT_HOPSCOTCH ++move_segment->timestamp; #pragma omp flush #endif move_bucket->hopInfo |= (1U << move_free_dist); move_bucket->hopInfo &= ~(1U << move_new_free_dist); *free_bucket = new_free_bucket; *free_dist -= move_free_dist - move_new_free_dist; #ifdef HYPRE_CONCURRENT_HOPSCOTCH if(start_seg != move_segment) omp_unset_lock(&move_segment->lock); #endif return; } #ifdef HYPRE_CONCURRENT_HOPSCOTCH if(start_seg != move_segment) omp_unset_lock(&move_segment->lock); #endif } ++move_bucket; } *free_bucket = NULL; *free_dist = 0; } void hypre_UnorderedIntSetCreate( hypre_UnorderedIntSet *s, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedBigIntSetCreate( hypre_UnorderedBigIntSet *s, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedIntMapCreate( hypre_UnorderedIntMap *m, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedBigIntMapCreate( hypre_UnorderedBigIntMap *m, HYPRE_Int inCapacity, HYPRE_Int concurrencyLevel); void hypre_UnorderedIntSetDestroy( hypre_UnorderedIntSet *s ); void hypre_UnorderedBigIntSetDestroy( hypre_UnorderedBigIntSet *s ); void hypre_UnorderedIntMapDestroy( hypre_UnorderedIntMap *m ); void hypre_UnorderedBigIntMapDestroy( hypre_UnorderedBigIntMap *m ); // Query Operations ......................................................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH static inline HYPRE_Int hypre_UnorderedIntSetContains( hypre_UnorderedIntSet *s, HYPRE_Int key ) { //CALCULATE HASH .......................... #ifdef HYPRE_BIGINT HYPRE_Int hash = hypre_BigHash(key); #else HYPRE_Int hash = hypre_Hash(key); #endif //CHECK IF ALREADY CONTAIN ................ hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask]; HYPRE_Int bucket = hash & s->bucketMask; hypre_uint hopInfo = s->hopInfo[bucket]; if (0 == hopInfo) return 0; else if (1 == hopInfo ) { if (hash == s->hash[bucket] && key == s->key[bucket]) return 1; else return 0; } HYPRE_Int startTimestamp = segment->timestamp; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if (hash == s->hash[currElm] && key == s->key[currElm]) return 1; hopInfo &= ~(1U << i); } if (segment->timestamp == startTimestamp) return 0; HYPRE_Int i; for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i) { if (hash == s->hash[bucket + i] && key == s->key[bucket + i]) return 1; } return 0; } static inline HYPRE_Int hypre_UnorderedBigIntSetContains( hypre_UnorderedBigIntSet *s, HYPRE_BigInt key ) { //CALCULATE HASH .......................... #if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT) HYPRE_BigInt hash = hypre_BigHash(key); #else HYPRE_BigInt hash = hypre_Hash(key); #endif //CHECK IF ALREADY CONTAIN ................ hypre_HopscotchSegment *segment = &s->segments[(HYPRE_Int)(hash & s->segmentMask)]; HYPRE_Int bucket = (HYPRE_Int)(hash & s->bucketMask); hypre_uint hopInfo = s->hopInfo[bucket]; if (0 == hopInfo) return 0; else if (1 == hopInfo ) { if (hash == s->hash[bucket] && key == s->key[bucket]) return 1; else return 0; } HYPRE_Int startTimestamp = segment->timestamp; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if (hash == s->hash[currElm] && key == s->key[currElm]) return 1; hopInfo &= ~(1U << i); } if (segment->timestamp == startTimestamp) return 0; HYPRE_Int i; for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i) { if (hash == s->hash[bucket + i] && key == s->key[bucket + i]) return 1; } return 0; } /** * @ret -1 if key doesn't exist */ static inline HYPRE_Int hypre_UnorderedIntMapGet( hypre_UnorderedIntMap *m, HYPRE_Int key) { //CALCULATE HASH .......................... #ifdef HYPRE_BIGINT HYPRE_Int hash = hypre_BigHash(key); #else HYPRE_Int hash = hypre_Hash(key); #endif //CHECK IF ALREADY CONTAIN ................ hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask]; hypre_HopscotchBucket *elmAry = &(m->table[hash & m->bucketMask]); hypre_uint hopInfo = elmAry->hopInfo; if (0 == hopInfo) return -1; else if (1 == hopInfo ) { if (hash == elmAry->hash && key == elmAry->key) return elmAry->data; else return -1; } HYPRE_Int startTimestamp = segment->timestamp; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_HopscotchBucket* currElm = elmAry + i; if (hash == currElm->hash && key == currElm->key) return currElm->data; hopInfo &= ~(1U << i); } if (segment->timestamp == startTimestamp) return -1; hypre_HopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]); HYPRE_Int i; for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket) { if (hash == currBucket->hash && key == currBucket->key) return currBucket->data; } return -1; } static inline HYPRE_Int hypre_UnorderedBigIntMapGet( hypre_UnorderedBigIntMap *m, HYPRE_BigInt key) { //CALCULATE HASH .......................... #if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT) HYPRE_BigInt hash = hypre_BigHash(key); #else HYPRE_BigInt hash = hypre_Hash(key); #endif //CHECK IF ALREADY CONTAIN ................ hypre_HopscotchSegment *segment = &m->segments[(HYPRE_Int)(hash & m->segmentMask)]; hypre_BigHopscotchBucket *elmAry = &(m->table[(HYPRE_Int)(hash & m->bucketMask)]); hypre_uint hopInfo = elmAry->hopInfo; if (0 == hopInfo) return -1; else if (1 == hopInfo ) { if (hash == elmAry->hash && key == elmAry->key) return elmAry->data; else return -1; } HYPRE_Int startTimestamp = segment->timestamp; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_BigHopscotchBucket* currElm = elmAry + i; if (hash == currElm->hash && key == currElm->key) return currElm->data; hopInfo &= ~(1U << i); } if (segment->timestamp == startTimestamp) return -1; hypre_BigHopscotchBucket *currBucket = &(m->table[hash & m->bucketMask]); HYPRE_Int i; for (i = 0; i< HYPRE_HOPSCOTCH_HASH_HOP_RANGE; ++i, ++currBucket) { if (hash == currBucket->hash && key == currBucket->key) return currBucket->data; } return -1; } #endif //status Operations ......................................................... static inline HYPRE_Int hypre_UnorderedIntSetSize(hypre_UnorderedIntSet *s) { HYPRE_Int counter = 0; HYPRE_Int n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { ++counter; } } return counter; } static inline HYPRE_Int hypre_UnorderedBigIntSetSize(hypre_UnorderedBigIntSet *s) { HYPRE_Int counter = 0; HYPRE_BigInt n = s->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if (HYPRE_HOPSCOTCH_HASH_EMPTY != s->hash[i]) { ++counter; } } return counter; } static inline HYPRE_Int hypre_UnorderedIntMapSize(hypre_UnorderedIntMap *m) { HYPRE_Int counter = 0; HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash ) { ++counter; } } return counter; } static inline HYPRE_Int hypre_UnorderedBigIntMapSize(hypre_UnorderedBigIntMap *m) { HYPRE_Int counter = 0; HYPRE_Int n = m->bucketMask + HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; HYPRE_Int i; for (i = 0; i < n; ++i) { if( HYPRE_HOPSCOTCH_HASH_EMPTY != m->table[i].hash ) { ++counter; } } return counter; } HYPRE_Int *hypre_UnorderedIntSetCopyToArray( hypre_UnorderedIntSet *s, HYPRE_Int *len ); HYPRE_BigInt *hypre_UnorderedBigIntSetCopyToArray( hypre_UnorderedBigIntSet *s, HYPRE_Int *len ); //modification Operations ................................................... #ifdef HYPRE_CONCURRENT_HOPSCOTCH static inline void hypre_UnorderedIntSetPut( hypre_UnorderedIntSet *s, HYPRE_Int key ) { //CALCULATE HASH .......................... #ifdef HYPRE_BIGINT HYPRE_Int hash = hypre_BigHash(key); #else HYPRE_Int hash = hypre_Hash(key); #endif //LOCK KEY HASH ENTERY .................... hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask]; omp_set_lock(&segment->lock); HYPRE_Int bucket = hash&s->bucketMask; //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = s->hopInfo[bucket]; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if(hash == s->hash[currElm] && key == s->key[currElm]) { omp_unset_lock(&segment->lock); return; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... HYPRE_Int free_bucket = bucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) && (HYPRE_HOPSCOTCH_HASH_EMPTY == hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket], (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) break; } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { s->key[free_bucket] = key; s->hash[free_bucket] = hash; s->hopInfo[bucket] |= 1U << free_dist; omp_unset_lock(&segment->lock); return; } hypre_UnorderedIntSetFindCloserFreeBucket(s, segment, &free_bucket, &free_dist); } while (-1 != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return; } static inline void hypre_UnorderedBigIntSetPut( hypre_UnorderedBigIntSet *s, HYPRE_BigInt key ) { //CALCULATE HASH .......................... #if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT) HYPRE_BigInt hash = hypre_BigHash(key); #else HYPRE_BigInt hash = hypre_Hash(key); #endif //LOCK KEY HASH ENTERY .................... hypre_HopscotchSegment *segment = &s->segments[hash & s->segmentMask]; omp_set_lock(&segment->lock); HYPRE_Int bucket = (HYPRE_Int)(hash&s->bucketMask); //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = s->hopInfo[bucket]; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); HYPRE_Int currElm = bucket + i; if(hash == s->hash[currElm] && key == s->key[currElm]) { omp_unset_lock(&segment->lock); return; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... HYPRE_Int free_bucket = bucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if( (HYPRE_HOPSCOTCH_HASH_EMPTY == s->hash[free_bucket]) && (HYPRE_HOPSCOTCH_HASH_EMPTY == hypre_compare_and_swap((HYPRE_Int *)&s->hash[free_bucket], (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) break; } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { s->key[free_bucket] = key; s->hash[free_bucket] = hash; s->hopInfo[bucket] |= 1U << free_dist; omp_unset_lock(&segment->lock); return; } hypre_UnorderedBigIntSetFindCloserFreeBucket(s, segment, &free_bucket, &free_dist); } while (-1 != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return; } static inline HYPRE_Int hypre_UnorderedIntMapPutIfAbsent( hypre_UnorderedIntMap *m, HYPRE_Int key, HYPRE_Int data) { //CALCULATE HASH .......................... #ifdef HYPRE_BIGINT HYPRE_Int hash = hypre_BigHash(key); #else HYPRE_Int hash = hypre_Hash(key); #endif //LOCK KEY HASH ENTERY .................... hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask]; omp_set_lock(&segment->lock); hypre_HopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]); //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = startBucket->hopInfo; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_HopscotchBucket* currElm = startBucket + i; if (hash == currElm->hash && key == currElm->key) { HYPRE_Int rc = currElm->data; omp_unset_lock(&segment->lock); return rc; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... hypre_HopscotchBucket* free_bucket = startBucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) && (HYPRE_HOPSCOTCH_HASH_EMPTY == __sync_val_compare_and_swap((HYPRE_Int *)&free_bucket->hash, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) break; } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { free_bucket->data = data; free_bucket->key = key; free_bucket->hash = hash; startBucket->hopInfo |= 1U << free_dist; omp_unset_lock(&segment->lock); return HYPRE_HOPSCOTCH_HASH_EMPTY; } hypre_UnorderedIntMapFindCloserFreeBucket(m, segment, &free_bucket, &free_dist); } while (NULL != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return HYPRE_HOPSCOTCH_HASH_EMPTY; } static inline HYPRE_Int hypre_UnorderedBigIntMapPutIfAbsent( hypre_UnorderedBigIntMap *m, HYPRE_BigInt key, HYPRE_Int data) { //CALCULATE HASH .......................... #if defined(HYPRE_BIGINT) || defined(HYPRE_MIXEDINT) HYPRE_BigInt hash = hypre_BigHash(key); #else HYPRE_BigInt hash = hypre_Hash(key); #endif //LOCK KEY HASH ENTERY .................... hypre_HopscotchSegment *segment = &m->segments[hash & m->segmentMask]; omp_set_lock(&segment->lock); hypre_BigHopscotchBucket* startBucket = &(m->table[hash & m->bucketMask]); //CHECK IF ALREADY CONTAIN ................ hypre_uint hopInfo = startBucket->hopInfo; while (0 != hopInfo) { HYPRE_Int i = first_lsb_bit_indx(hopInfo); hypre_BigHopscotchBucket* currElm = startBucket + i; if (hash == currElm->hash && key == currElm->key) { HYPRE_Int rc = currElm->data; omp_unset_lock(&segment->lock); return rc; } hopInfo &= ~(1U << i); } //LOOK FOR FREE BUCKET .................... hypre_BigHopscotchBucket* free_bucket = startBucket; HYPRE_Int free_dist = 0; for ( ; free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE; ++free_dist, ++free_bucket) { if( (HYPRE_HOPSCOTCH_HASH_EMPTY == free_bucket->hash) && (HYPRE_HOPSCOTCH_HASH_EMPTY == __sync_val_compare_and_swap((HYPRE_Int *)&free_bucket->hash, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_EMPTY, (HYPRE_Int)HYPRE_HOPSCOTCH_HASH_BUSY)) ) break; } //PLACE THE NEW KEY ....................... if (free_dist < HYPRE_HOPSCOTCH_HASH_INSERT_RANGE) { do { if (free_dist < HYPRE_HOPSCOTCH_HASH_HOP_RANGE) { free_bucket->data = data; free_bucket->key = key; free_bucket->hash = hash; startBucket->hopInfo |= 1U << free_dist; omp_unset_lock(&segment->lock); return HYPRE_HOPSCOTCH_HASH_EMPTY; } hypre_UnorderedBigIntMapFindCloserFreeBucket(m, segment, &free_bucket, &free_dist); } while (NULL != free_bucket); } //NEED TO RESIZE .......................... hypre_error_w_msg(HYPRE_ERROR_GENERIC,"ERROR - RESIZE is not implemented\n"); /*fprintf(stderr, "ERROR - RESIZE is not implemented\n");*/ exit(1); return HYPRE_HOPSCOTCH_HASH_EMPTY; } #endif #ifdef __cplusplus } // extern "C" #endif #endif // hypre_HOPSCOTCH_HASH_HEADER
key-hash.h
/* * Created by KuangPeng on 2018/4/8 */ #ifndef KEY_HASH_H #define KEY_HASH_H #include <stdint.h> namespace ns3 { enum CRC8_ALG { CRC8 = 0, CRC8_DARC, CRC8_I_CODE, CRC8_ITU, CRC8_MAXIM, CRC8_ROHC, CRC8_WCDMA, CRC8_ALG_NUM }; enum CRC16_ALG { CRC16, CRC16_BUYPASS, CRC16_DDS_110, CRC16_DECT, CRC16_DNP, CRC16_EN_13757, CRC16_GENIBUS, CRC16_MAXIM, CRC16_MCRF4XX, CRC16_RIELLO, CRC16_T10_DIF, CRC16_TELEDISK, CRC16_USB, X_25, XMODEM, MODBUS, KERMIT, CRC_CCITT, CRC_AUG_CCITT, CRC16_ALG_NUM }; enum CRC32_ALG { CRC32 = 0, CRC32_BZIP2, CRC32C, CRC32D, CRC32_MPEG, POSIX, CRC32Q, JAMCRC, XFER, CRC32_ALG_NUM }; static inline int key_compare(uint8_t* key1, uint8_t* key2, int length) { int i, j = 0; // #pragma omp parallel for for (i = 0; i < length; i++) { if (key1[i] != key2[i]) { j = 1; } } return j; } uint32_t hash_crc32(const void* buf, int length, int alg); uint16_t hash_crc16(const void* buf, int length, int alg); uint8_t hash_crc8(const void* buf, int length, int alg); } #endif // !KEY_HASH_H
binarytrees-3.c
// The Computer Language Benchmarks Game // http://benchmarksgame.alioth.debian.org/ // // Contributed by Jeremy Zerfas // Based on the C++ program from Jon Harrop, Alex Mizrahi, and Bruno Coutinho. // This controls the width of lines that are output by this program. #define MAXIMUM_LINE_WIDTH 60 #include <stdint.h> #include <stdlib.h> #include <stdio.h> typedef off_t off64_t; // This is needed to keep APR happy on 32 bit systems. #include <apr_pools.h> // intptr_t should be the native integer type on most sane systems. typedef intptr_t intnative_t; typedef struct tree_node{ struct tree_node * left_Node, * right_Node; int32_t value; } tree_node; // Create a binary tree of depth tree_Depth in memory_Pool, set the root node's // value to root_Node_Value, and finally return a pointer to the created binary // tree. static inline tree_node * create_Tree(const intnative_t root_Node_Value, const intnative_t tree_Depth, apr_pool_t * const memory_Pool){ tree_node * const root_Node=apr_palloc(memory_Pool, sizeof(tree_node)); // If tree_Depth is one or more then recursively call create_Tree() in order // to create the left and right subtrees using 2*root_Node_Value-1 and // 2*root_Node_Value respectively as the root values for those subtrees. if(tree_Depth>0){ root_Node->left_Node=create_Tree(2*root_Node_Value-1, tree_Depth-1, memory_Pool); root_Node->right_Node=create_Tree(2*root_Node_Value, tree_Depth-1, memory_Pool); }else root_Node->left_Node=root_Node->right_Node=NULL; root_Node->value=root_Node_Value; return root_Node; } // Compute and return the checksum for the binary tree that has root_Node as the // root node. static inline intnative_t compute_Tree_Checksum( const tree_node * const root_Node){ // If there are subtrees then recursively call compute_Tree_Checksum() on // them and factor their values into the checksum, otherwise just return // the value of root_Node. if(root_Node->left_Node) return compute_Tree_Checksum(root_Node->left_Node)- compute_Tree_Checksum(root_Node->right_Node)+root_Node->value; else return root_Node->value; } int main(int argc, char ** argv){ // Set minimum_Tree_Depth to 4 and maximum_Tree_Depth to the maximum of what // was specified as the argument to the program and minimum_Tree_Depth+2. const intnative_t minimum_Tree_Depth=4; intnative_t maximum_Tree_Depth=atoi(argv[1]); if(maximum_Tree_Depth < minimum_Tree_Depth+2) maximum_Tree_Depth=minimum_Tree_Depth+2; apr_initialize(); apr_pool_t * memory_Pool; // Create a memory pool, create a binary tree of depth maximum_Tree_Depth+1, // compute the checksum of the binary tree, print the statistics, and then // delete the memory pool. apr_pool_create_unmanaged(&memory_Pool); tree_node * stretch_Tree=create_Tree(0, maximum_Tree_Depth+1, memory_Pool); printf("stretch tree of depth %jd\t check: %jd\n", (intmax_t)maximum_Tree_Depth+1, (intmax_t)compute_Tree_Checksum(stretch_Tree)); apr_pool_destroy(memory_Pool); // Create a memory pool and then create a long-lived binary tree of depth // maximum_Tree_Depth which will be left alone for a while while // more binary trees get allocated and deallocaited as required by the // rules. We'll finish working with this later. apr_pool_create_unmanaged(&memory_Pool); tree_node * long_Lived_Tree=create_Tree(0, maximum_Tree_Depth, memory_Pool); // Create a lot of binary trees in parallel of depths ranging from // minimum_Tree_Depth to maximum_Tree_Depth, compute and tally up all their // checksums, destroy the trees, and then record the statistics to // output_Buffer[] so they can be displayed in order later. char output_Buffer[maximum_Tree_Depth+1][MAXIMUM_LINE_WIDTH+1]; intnative_t current_Tree_Depth; #pragma omp parallel for for(current_Tree_Depth=minimum_Tree_Depth; current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2){ intnative_t iterations=1<<(maximum_Tree_Depth-current_Tree_Depth+ minimum_Tree_Depth); // Create a memory pool for this thread to use. apr_pool_t * thread_Memory_Pool; apr_pool_create_unmanaged(&thread_Memory_Pool); intnative_t i=1, total_Trees_Checksum=0; for(; i<=iterations; ++i){ // Create two binary trees of depth current_Tree_Depth but with one // having a root node value of i and the other a root node value of // -1. tree_node * const tree_1=create_Tree(i, current_Tree_Depth, thread_Memory_Pool); tree_node * const tree_2=create_Tree(-i, current_Tree_Depth, thread_Memory_Pool); // Compute the checksums for both trees and add them to // total_Trees_Checksum. total_Trees_Checksum+=compute_Tree_Checksum(tree_1)+ compute_Tree_Checksum(tree_2); apr_pool_clear(thread_Memory_Pool); } apr_pool_destroy(thread_Memory_Pool); // Record the statistics for the trees of depth current_Tree_Depth. sprintf(output_Buffer[current_Tree_Depth], "%jd\t trees of depth %jd\t check: %jd\n", (intmax_t)2*iterations, (intmax_t)current_Tree_Depth, (intmax_t)total_Trees_Checksum); } // Print the statistics for all of the various tree depths. for(current_Tree_Depth=minimum_Tree_Depth; current_Tree_Depth<=maximum_Tree_Depth; current_Tree_Depth+=2) printf("%s", output_Buffer[current_Tree_Depth]); // Compute the checksum of the long-lived binary tree that we created // earlier, print the statistics, and then delete the memory pool. printf("long lived tree of depth %jd\t check: %jd\n", (intmax_t)maximum_Tree_Depth, (intmax_t)compute_Tree_Checksum(long_Lived_Tree)); apr_pool_destroy(memory_Pool); apr_terminate(); return 0; }
nodal_residualbased_elimination_builder_and_solver_continuity.h
// | / | // ' / __| _` | __| _ \ __| // . \ | ( | | ( |\__ ` // _|\_\_| \__,_|\__|\___/ ____/ // Multi-Physics // // License: BSD License // Kratos default license: kratos/license.txt // // Main authors: Riccardo Rossi, Alessandro Franci // // #if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_CONTINUITY) #define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_CONTINUITY /* System includes */ #include <set> #ifdef _OPENMP #include <omp.h> #endif /* External includes */ // #define USE_GOOGLE_HASH #ifdef USE_GOOGLE_HASH #include "sparsehash/dense_hash_set" //included in external libraries #else #include <unordered_set> #endif /* Project includes */ #include "utilities/timer.h" #include "includes/define.h" #include "includes/key_hash.h" #include "solving_strategies/builder_and_solvers/builder_and_solver.h" #include "includes/model_part.h" #include "pfem_fluid_dynamics_application_variables.h" namespace Kratos { ///@name Kratos Globals ///@{ ///@} ///@name Type Definitions ///@{ ///@} ///@name Enum's ///@{ ///@} ///@name Functions ///@{ ///@} ///@name Kratos Classes ///@{ /** * @class NodalResidualBasedEliminationBuilderAndSolverContinuity * @ingroup KratosCore * @brief Current class provides an implementation for standard builder and solving operations. * @details The RHS is constituted by the unbalanced loads (residual) * Degrees of freedom are reordered putting the restrained degrees of freedom at * the end of the system ordered in reverse order with respect to the DofSet. * Imposition of the dirichlet conditions is naturally dealt with as the residual already contains * this information. * Calculation of the reactions involves a cost very similiar to the calculation of the total residual * @author Riccardo Rossi */ template <class TSparseSpace, class TDenseSpace, //= DenseSpace<double>, class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace> > class NodalResidualBasedEliminationBuilderAndSolverContinuity : public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> { public: ///@name Type Definitions ///@{ KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolverContinuity); typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType; typedef typename BaseType::TSchemeType TSchemeType; typedef typename BaseType::TDataType TDataType; typedef typename BaseType::DofsArrayType DofsArrayType; typedef typename BaseType::TSystemMatrixType TSystemMatrixType; typedef typename BaseType::TSystemVectorType TSystemVectorType; typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType; typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType; typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType; typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType; typedef Node<3> NodeType; typedef typename BaseType::NodesArrayType NodesArrayType; typedef typename BaseType::ElementsArrayType ElementsArrayType; typedef typename BaseType::ConditionsArrayType ConditionsArrayType; typedef typename BaseType::ElementsContainerType ElementsContainerType; typedef Vector VectorType; ///@} ///@name Life Cycle ///@{ /** Constructor. */ NodalResidualBasedEliminationBuilderAndSolverContinuity( typename TLinearSolver::Pointer pNewLinearSystemSolver) : BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver) { // KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolverContinuity") << "Using the standard builder and solver " << std::endl; } /** Destructor. */ ~NodalResidualBasedEliminationBuilderAndSolverContinuity() override { } ///@} ///@name Operators ///@{ ///@} ///@name Operations ///@{ void BuildNodally( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //contributions to the continuity equation system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Element::EquationIdVectorType EquationId; const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; double pressure = 0; double deltaPressure = 0; double meanMeshSize = 0; double characteristicLength = 0; double density = 0; double nodalVelocityNorm = 0; double tauStab = 0; double dNdXi = 0; double dNdYi = 0; double dNdZi = 0; double dNdXj = 0; double dNdYj = 0; double dNdZj = 0; unsigned int firstRow = 0; unsigned int firstCol = 0; /* #pragma omp parallel */ { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); const unsigned int neighSize = neighb_nodes.size() + 1; if (neighSize > 1) { const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); noalias(LHS_Contribution) = ZeroMatrix(neighSize, neighSize); noalias(RHS_Contribution) = ZeroVector(neighSize); if (EquationId.size() != neighSize) EquationId.resize(neighSize, false); double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } if (deviatoricCoeff > 0.1) { deviatoricCoeff = 0.1; } double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1); LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff; RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff; RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume; const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE); EquationId[0] = itNode->GetDof(PRESSURE, xDofPos).EquationId(); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { EquationId[i + 1] = neighb_nodes[i].GetDof(PRESSURE, xDofPos).EquationId(); } firstRow = 0; firstCol = 0; meanMeshSize = itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE); characteristicLength = 1.0 * meanMeshSize; density = itNode->FastGetSolutionStepValue(DENSITY); /* double tauStab=1.0/(8.0*deviatoricCoeff/(meanMeshSize*meanMeshSize)+2.0*density/timeInterval); */ if (dimension == 2) { nodalVelocityNorm = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) + itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y)); } else if (dimension == 3) { nodalVelocityNorm = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) + itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y) + itNode->FastGetSolutionStepValue(VELOCITY_Z) * itNode->FastGetSolutionStepValue(VELOCITY_Z)); } tauStab = 1.0 * (characteristicLength * characteristicLength * timeInterval) / (density * nodalVelocityNorm * timeInterval * characteristicLength + density * characteristicLength * characteristicLength + 8.0 * deviatoricCoeff * timeInterval); itNode->FastGetSolutionStepValue(NODAL_TAU) = tauStab; /* std::cout<<"tauStab= "<<tauStab<<std::endl; */ LHS_Contribution(0, 0) += +nodalVolume * tauStab * density / (volumetricCoeff * timeInterval); RHS_Contribution[0] += -nodalVolume * tauStab * density / (volumetricCoeff * timeInterval) * (deltaPressure - itNode->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) * timeInterval); if (itNode->Is(FREE_SURFACE)) { // // double nodalFreesurfaceArea=itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA); // /* LHS_Contribution(0,0) += + 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize; */ // /* RHS_Contribution[0] += - 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize * itNode->FastGetSolutionStepValue(PRESSURE,0); */ LHS_Contribution(0, 0) += +4.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize); RHS_Contribution[0] += -4.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE, 0); const array_1d<double, 3> &Normal = itNode->FastGetSolutionStepValue(NORMAL); Vector &SpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); array_1d<double, 3> nodalAcceleration = 0.5 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 1); /* nodalAcceleration= (itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval; */ double nodalNormalAcceleration = 0; double nodalNormalProjDefRate = 0; if (dimension == 2) { nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + 2 * Normal[0] * SpatialDefRate[2] * Normal[1]; /* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X,1) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1); */ // nodalNormalAcceleration=(0.5*(itNode->FastGetSolutionStepValue(VELOCITY_X,0)-itNode->FastGetSolutionStepValue(VELOCITY_X,1))/timeInterval+0.5*itNode->FastGetSolutionStepValue(ACCELERATION_X,1))*Normal[0] + // (0.5*(itNode->FastGetSolutionStepValue(VELOCITY_Y,0)-itNode->FastGetSolutionStepValue(VELOCITY_Y,1))/timeInterval+0.5*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1))*Normal[1]; nodalNormalAcceleration = Normal[0] * nodalAcceleration[0] + Normal[1] * nodalAcceleration[1]; } else if (dimension == 3) { nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + Normal[2] * SpatialDefRate[2] * Normal[2] + 2 * Normal[0] * SpatialDefRate[3] * Normal[1] + 2 * Normal[0] * SpatialDefRate[4] * Normal[2] + 2 * Normal[1] * SpatialDefRate[5] * Normal[2]; /* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y) + Normal[2]*itNode->FastGetSolutionStepValue(ACCELERATION_Z); */ /* nodalNormalAcceleration=Normal[0]*nodalAcceleration[0] + Normal[1]*nodalAcceleration[1] + Normal[2]*nodalAcceleration[2]; */ } // RHS_Contribution[0] += tauStab * (density*nodalNormalAcceleration - 4.0*deviatoricCoeff*nodalNormalProjDefRate/meanMeshSize) * nodalFreesurfaceArea; double accelerationContribution = 2.0 * density * nodalNormalAcceleration / meanMeshSize; double deviatoricContribution = 8.0 * deviatoricCoeff * nodalNormalProjDefRate / (meanMeshSize * meanMeshSize); RHS_Contribution[0] += 1.0 * tauStab * (accelerationContribution - deviatoricContribution) * nodalVolume; } array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); // double posX= itNode->X(); // double posY= itNode->Y(); // double coeffX =(12.0-24.0*posY)*pow(posX,4); // coeffX += (-24.0+48.0*posY)*pow(posX,3); // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2); // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX; // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3); // double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3); // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2); // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX; // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1]; if (i != 0) { // i==0 of EquationIs has been already filled with the master node (that is not included in neighb_nodes). The next is stored for i+1 EquationId[i] = neighb_nodes[i - 1].GetDof(PRESSURE, xDofPos).EquationId(); // at i==0 density and volume acceleration are taken from the master node density = neighb_nodes[i - 1].FastGetSolutionStepValue(DENSITY); // VolumeAcceleration = neighb_nodes[i-1].FastGetSolutionStepValue(VOLUME_ACCELERATION); // // posX= neighb_nodes[i-1].X(); // // posY= neighb_nodes[i-1].Y(); // // coeffX =(12.0-24.0*posY)*pow(posX,4); // // coeffX += (-24.0+48.0*posY)*pow(posX,3); // // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2); // // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX; // // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3); // // coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3); // // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2); // // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX; // // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4); } if (dimension == 2) { // RHS_Contribution[i] += - tauStab * density * (dNdXi* VolumeAcceleration[0]*coeffX + dNdYi* VolumeAcceleration[1]*coeffY) * nodalVolume; RHS_Contribution[i] += -tauStab * density * (dNdXi * VolumeAcceleration[0] + dNdYi * VolumeAcceleration[1]) * nodalVolume; } else if (dimension == 3) { dNdZi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 2]; RHS_Contribution[i] += -tauStab * density * (dNdXi * VolumeAcceleration[0] + dNdYi * VolumeAcceleration[1] + dNdZi * VolumeAcceleration[2]) * nodalVolume; } firstRow = 0; for (unsigned int j = 0; j < neighSize; j++) { dNdXj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow]; dNdYj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 1]; // double Vx=itNode->FastGetSolutionStepValue(VELOCITY_X,0); // double Vy=itNode->FastGetSolutionStepValue(VELOCITY_Y,0); if (j != 0) { pressure = neighb_nodes[j - 1].FastGetSolutionStepValue(PRESSURE, 0); // Vx= neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_X,0); // Vy= neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Y,0); // meanMeshSize=neighb_nodes[j-1].FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE); // characteristicLength=2.0*meanMeshSize; // density=neighb_nodes[j-1].FastGetSolutionStepValue(DENSITY); // if(dimension==2){ // nodalVelocityNorm= sqrt(neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_X)*neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_X) + // neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Y)*neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Y)); // }else if(dimension==3){ // nodalVelocityNorm=sqrt(neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_X)*neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_X) + // neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Y)*neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Y) + // neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Z)*neighb_nodes[j-1].FastGetSolutionStepValue(VELOCITY_Z)); // } } else { pressure = itNode->FastGetSolutionStepValue(PRESSURE, 0); // meanMeshSize=itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE); // characteristicLength=2.0*meanMeshSize; // density=itNode->FastGetSolutionStepValue(DENSITY); // if(dimension==2){ // nodalVelocityNorm= sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X)*itNode->FastGetSolutionStepValue(VELOCITY_X) + // itNode->FastGetSolutionStepValue(VELOCITY_Y)*itNode->FastGetSolutionStepValue(VELOCITY_Y)); // }else if(dimension==3){ // nodalVelocityNorm=sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X)*itNode->FastGetSolutionStepValue(VELOCITY_X) + // itNode->FastGetSolutionStepValue(VELOCITY_Y)*itNode->FastGetSolutionStepValue(VELOCITY_Y) + // itNode->FastGetSolutionStepValue(VELOCITY_Z)*itNode->FastGetSolutionStepValue(VELOCITY_Z)); // } } // tauStab= 1.0 * (characteristicLength * characteristicLength * timeInterval) / ( density * nodalVelocityNorm * timeInterval * characteristicLength + density * characteristicLength * characteristicLength + 8.0 * deviatoricCoeff * timeInterval ); if (dimension == 2) { // // ////////////////// Laplacian term for LHS LHS_Contribution(i, j) += +tauStab * (dNdXi * dNdXj + dNdYi * dNdYj) * nodalVolume; // // ////////////////// Laplacian term L_ij*P_j for RHS RHS_Contribution[i] += -tauStab * (dNdXi * dNdXj + dNdYi * dNdYj) * nodalVolume * pressure; // RHS_Contribution[i] += (dNdXj*Vx + dNdYj*Vy)*nodalVolume/3.0; // LHS_Contribution(i,j)+= nodalVolume/volumetricCoeff/(1.0+double(neighSize)); // if(i==j){ // RHS_Contribution[i] += (-deltaPressure/volumetricCoeff )*nodalVolume; // } } else if (dimension == 3) { dNdZj = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstRow + 2]; ////////////////// Laplacian term for LHS LHS_Contribution(i, j) += +tauStab * (dNdXi * dNdXj + dNdYi * dNdYj + dNdZi * dNdZj) * nodalVolume; ////////////////// Laplacian term L_ij*P_j for RHS RHS_Contribution[i] += -tauStab * (dNdXi * dNdXj + dNdYi * dNdYj + dNdZi * dNdZj) * nodalVolume * pressure; } firstRow += dimension; } firstCol += dimension; } #ifdef _OPENMP Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } } } KRATOS_CATCH("") } void BuildNodallyUnlessLaplacian( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //contributions to the continuity equation system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Element::EquationIdVectorType EquationId; const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; double deltaPressure = 0; double meanMeshSize = 0; double characteristicLength = 0; double density = 0; double nodalVelocityNorm = 0; double tauStab = 0; double dNdXi = 0; double dNdYi = 0; double dNdZi = 0; unsigned int firstCol = 0; /* #pragma omp parallel */ { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); const unsigned int neighSize = neighb_nodes.size() + 1; if (neighSize > 1) { const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); noalias(LHS_Contribution) = ZeroMatrix(neighSize, neighSize); noalias(RHS_Contribution) = ZeroVector(neighSize); if (EquationId.size() != neighSize) EquationId.resize(neighSize, false); double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } if (deviatoricCoeff > 0.1) { deviatoricCoeff = 0.1; } double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1); LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff; RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff; RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume; const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE); EquationId[0] = itNode->GetDof(PRESSURE, xDofPos).EquationId(); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { EquationId[i + 1] = neighb_nodes[i].GetDof(PRESSURE, xDofPos).EquationId(); } firstCol = 0; meanMeshSize = itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE); characteristicLength = 1.0 * meanMeshSize; density = itNode->FastGetSolutionStepValue(DENSITY); /* double tauStab=1.0/(8.0*deviatoricCoeff/(meanMeshSize*meanMeshSize)+2.0*density/timeInterval); */ if (dimension == 2) { nodalVelocityNorm = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) + itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y)); } else if (dimension == 3) { nodalVelocityNorm = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) + itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y) + itNode->FastGetSolutionStepValue(VELOCITY_Z) * itNode->FastGetSolutionStepValue(VELOCITY_Z)); } tauStab = 1.0 * (characteristicLength * characteristicLength * timeInterval) / (density * nodalVelocityNorm * timeInterval * characteristicLength + density * characteristicLength * characteristicLength + 8.0 * deviatoricCoeff * timeInterval); itNode->FastGetSolutionStepValue(NODAL_TAU) = tauStab; LHS_Contribution(0, 0) += +nodalVolume * tauStab * density / (volumetricCoeff * timeInterval); RHS_Contribution[0] += -nodalVolume * tauStab * density / (volumetricCoeff * timeInterval) * (deltaPressure - itNode->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) * timeInterval); if (itNode->Is(FREE_SURFACE)) { // // double nodalFreesurfaceArea=itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA); // /* LHS_Contribution(0,0) += + 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize; */ // /* RHS_Contribution[0] += - 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize * itNode->FastGetSolutionStepValue(PRESSURE,0); */ LHS_Contribution(0, 0) += +4.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize); RHS_Contribution[0] += -4.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE, 0); array_1d<double, 3> &Normal = itNode->FastGetSolutionStepValue(NORMAL); Vector &SpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); array_1d<double, 3> nodalAcceleration = 0.5 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 1); /* nodalAcceleration= (itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval; */ double nodalNormalAcceleration = 0; double nodalNormalProjDefRate = 0; if (dimension == 2) { nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + 2 * Normal[0] * SpatialDefRate[2] * Normal[1]; /* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X,1) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1); */ // nodalNormalAcceleration=(0.5*(itNode->FastGetSolutionStepValue(VELOCITY_X,0)-itNode->FastGetSolutionStepValue(VELOCITY_X,1))/timeInterval+0.5*itNode->FastGetSolutionStepValue(ACCELERATION_X,1))*Normal[0] + // (0.5*(itNode->FastGetSolutionStepValue(VELOCITY_Y,0)-itNode->FastGetSolutionStepValue(VELOCITY_Y,1))/timeInterval+0.5*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1))*Normal[1]; nodalNormalAcceleration = Normal[0] * nodalAcceleration[0] + Normal[1] * nodalAcceleration[1]; } else if (dimension == 3) { nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + Normal[2] * SpatialDefRate[2] * Normal[2] + 2 * Normal[0] * SpatialDefRate[3] * Normal[1] + 2 * Normal[0] * SpatialDefRate[4] * Normal[2] + 2 * Normal[1] * SpatialDefRate[5] * Normal[2]; /* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y) + Normal[2]*itNode->FastGetSolutionStepValue(ACCELERATION_Z); */ /* nodalNormalAcceleration=Normal[0]*nodalAcceleration[0] + Normal[1]*nodalAcceleration[1] + Normal[2]*nodalAcceleration[2]; */ } // RHS_Contribution[0] += tauStab * (density*nodalNormalAcceleration - 4.0*deviatoricCoeff*nodalNormalProjDefRate/meanMeshSize) * nodalFreesurfaceArea; double accelerationContribution = 2.0 * density * nodalNormalAcceleration / meanMeshSize; double deviatoricContribution = 8.0 * deviatoricCoeff * nodalNormalProjDefRate / (meanMeshSize * meanMeshSize); RHS_Contribution[0] += 1.0 * tauStab * (accelerationContribution - deviatoricContribution) * nodalVolume; } array_1d<double, 3> &VolumeAcceleration = itNode->FastGetSolutionStepValue(VOLUME_ACCELERATION); // double posX= itNode->X(); // double posY= itNode->Y(); // double coeffX =(12.0-24.0*posY)*pow(posX,4); // coeffX += (-24.0+48.0*posY)*pow(posX,3); // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2); // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX; // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3); // double coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3); // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2); // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX; // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4); for (unsigned int i = 0; i < neighSize; i++) { dNdXi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol]; dNdYi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 1]; if (i != 0) { // i==0 of EquationIs has been already filled with the master node (that is not included in neighb_nodes). The next is stored for i+1 EquationId[i] = neighb_nodes[i - 1].GetDof(PRESSURE, xDofPos).EquationId(); // at i==0 density and volume acceleration are taken from the master node density = neighb_nodes[i - 1].FastGetSolutionStepValue(DENSITY); // // VolumeAcceleration = neighb_nodes[i-1].FastGetSolutionStepValue(VOLUME_ACCELERATION); // // posX= neighb_nodes[i-1].X(); // // posY= neighb_nodes[i-1].Y(); // // coeffX =(12.0-24.0*posY)*pow(posX,4); // // coeffX += (-24.0+48.0*posY)*pow(posX,3); // // coeffX += (-48.0*posY+72.0*pow(posY,2)-48.0*pow(posY,3)+12.0)*pow(posX,2); // // coeffX += (-2.0+24.0*posY-72.0*pow(posY,2)+48.0*pow(posY,3))*posX; // // coeffX += 1.0-4.0*posY+12.0*pow(posY,2)-8.0*pow(posY,3); // // coeffY =(8.0-48.0*posY+48.0*pow(posY,2))*pow(posX,3); // // coeffY += (-12.0+72.0*posY-72.0*pow(posY,2))*pow(posX,2); // // coeffY += (4.0-24.0*posY+48.0*pow(posY,2)-48.0*pow(posY,3)+24.0*pow(posY,4))*posX; // // coeffY += -12.0*pow(posY,2)+24.0*pow(posY,3)-12.0*pow(posY,4); } if (dimension == 2) { // RHS_Contribution[i] += - tauStab * density * (dNdXi* VolumeAcceleration[0]*coeffX + dNdYi* VolumeAcceleration[1]*coeffY) * nodalVolume; RHS_Contribution[i] += -tauStab * density * (dNdXi * VolumeAcceleration[0] + dNdYi * VolumeAcceleration[1]) * nodalVolume; } else if (dimension == 3) { dNdZi = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS)[firstCol + 2]; RHS_Contribution[i] += -tauStab * density * (dNdXi * VolumeAcceleration[0] + dNdYi * VolumeAcceleration[1] + dNdZi * VolumeAcceleration[2]) * nodalVolume; } firstCol += dimension; } #ifdef _OPENMP Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } } } KRATOS_CATCH("") } void BuildNodallyNoVolumetricStabilizedTerms( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //contributions to the continuity equation system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Element::EquationIdVectorType EquationId; const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); const unsigned int dimension = rModelPart.ElementsBegin()->GetGeometry().WorkingSpaceDimension(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; double deltaPressure = 0; double meanMeshSize = 0; double characteristicLength = 0; double density = 0; double nodalVelocityNorm = 0; double tauStab = 0; /* #pragma omp parallel */ { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); const unsigned int neighSize = neighb_nodes.size() + 1; if (neighSize > 1) { const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); noalias(LHS_Contribution) = ZeroMatrix(neighSize, neighSize); noalias(RHS_Contribution) = ZeroVector(neighSize); if (EquationId.size() != neighSize) EquationId.resize(neighSize, false); double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } if (deviatoricCoeff > 0.1) { deviatoricCoeff = 0.1; } double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1); LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff; RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff; RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume; const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE); EquationId[0] = itNode->GetDof(PRESSURE, xDofPos).EquationId(); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { EquationId[i + 1] = neighb_nodes[i].GetDof(PRESSURE, xDofPos).EquationId(); } meanMeshSize = itNode->FastGetSolutionStepValue(NODAL_MEAN_MESH_SIZE); characteristicLength = 1.0 * meanMeshSize; density = itNode->FastGetSolutionStepValue(DENSITY); /* double tauStab=1.0/(8.0*deviatoricCoeff/(meanMeshSize*meanMeshSize)+2.0*density/timeInterval); */ if (dimension == 2) { nodalVelocityNorm = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) + itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y)); } else if (dimension == 3) { nodalVelocityNorm = sqrt(itNode->FastGetSolutionStepValue(VELOCITY_X) * itNode->FastGetSolutionStepValue(VELOCITY_X) + itNode->FastGetSolutionStepValue(VELOCITY_Y) * itNode->FastGetSolutionStepValue(VELOCITY_Y) + itNode->FastGetSolutionStepValue(VELOCITY_Z) * itNode->FastGetSolutionStepValue(VELOCITY_Z)); } tauStab = 1.0 * (characteristicLength * characteristicLength * timeInterval) / (density * nodalVelocityNorm * timeInterval * characteristicLength + density * characteristicLength * characteristicLength + 8.0 * deviatoricCoeff * timeInterval); itNode->FastGetSolutionStepValue(NODAL_TAU) = tauStab; LHS_Contribution(0, 0) += +nodalVolume * tauStab * density / (volumetricCoeff * timeInterval); RHS_Contribution[0] += -nodalVolume * tauStab * density / (volumetricCoeff * timeInterval) * (deltaPressure - itNode->FastGetSolutionStepValue(PRESSURE_VELOCITY, 0) * timeInterval); if (itNode->Is(FREE_SURFACE)) { // // double nodalFreesurfaceArea=itNode->FastGetSolutionStepValue(NODAL_FREESURFACE_AREA); // /* LHS_Contribution(0,0) += + 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize; */ // /* RHS_Contribution[0] += - 2.0 * tauStab * nodalFreesurfaceArea / meanMeshSize * itNode->FastGetSolutionStepValue(PRESSURE,0); */ LHS_Contribution(0, 0) += +4.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize); RHS_Contribution[0] += -4.0 * tauStab * nodalVolume / (meanMeshSize * meanMeshSize) * itNode->FastGetSolutionStepValue(PRESSURE, 0); array_1d<double, 3> &Normal = itNode->FastGetSolutionStepValue(NORMAL); Vector &SpatialDefRate = itNode->FastGetSolutionStepValue(NODAL_SPATIAL_DEF_RATE); array_1d<double, 3> nodalAcceleration = 0.5 * (itNode->FastGetSolutionStepValue(VELOCITY, 0) - itNode->FastGetSolutionStepValue(VELOCITY, 1)) / timeInterval - itNode->FastGetSolutionStepValue(ACCELERATION, 1); /* nodalAcceleration= (itNode->FastGetSolutionStepValue(VELOCITY,0)-itNode->FastGetSolutionStepValue(VELOCITY,1))/timeInterval; */ double nodalNormalAcceleration = 0; double nodalNormalProjDefRate = 0; if (dimension == 2) { nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + 2 * Normal[0] * SpatialDefRate[2] * Normal[1]; /* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X,1) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1); */ // nodalNormalAcceleration=(0.5*(itNode->FastGetSolutionStepValue(VELOCITY_X,0)-itNode->FastGetSolutionStepValue(VELOCITY_X,1))/timeInterval+0.5*itNode->FastGetSolutionStepValue(ACCELERATION_X,1))*Normal[0] + // (0.5*(itNode->FastGetSolutionStepValue(VELOCITY_Y,0)-itNode->FastGetSolutionStepValue(VELOCITY_Y,1))/timeInterval+0.5*itNode->FastGetSolutionStepValue(ACCELERATION_Y,1))*Normal[1]; nodalNormalAcceleration = Normal[0] * nodalAcceleration[0] + Normal[1] * nodalAcceleration[1]; } else if (dimension == 3) { nodalNormalProjDefRate = Normal[0] * SpatialDefRate[0] * Normal[0] + Normal[1] * SpatialDefRate[1] * Normal[1] + Normal[2] * SpatialDefRate[2] * Normal[2] + 2 * Normal[0] * SpatialDefRate[3] * Normal[1] + 2 * Normal[0] * SpatialDefRate[4] * Normal[2] + 2 * Normal[1] * SpatialDefRate[5] * Normal[2]; /* nodalNormalAcceleration=Normal[0]*itNode->FastGetSolutionStepValue(ACCELERATION_X) + Normal[1]*itNode->FastGetSolutionStepValue(ACCELERATION_Y) + Normal[2]*itNode->FastGetSolutionStepValue(ACCELERATION_Z); */ /* nodalNormalAcceleration=Normal[0]*nodalAcceleration[0] + Normal[1]*nodalAcceleration[1] + Normal[2]*nodalAcceleration[2]; */ } // RHS_Contribution[0] += tauStab * (density*nodalNormalAcceleration - 4.0*deviatoricCoeff*nodalNormalProjDefRate/meanMeshSize) * nodalFreesurfaceArea; double accelerationContribution = 2.0 * density * nodalNormalAcceleration / meanMeshSize; double deviatoricContribution = 8.0 * deviatoricCoeff * nodalNormalProjDefRate / (meanMeshSize * meanMeshSize); RHS_Contribution[0] += 1.0 * tauStab * (accelerationContribution - deviatoricContribution) * nodalVolume; } #ifdef _OPENMP Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } } } KRATOS_CATCH("") } void BuildNodallyNotStabilized( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //contributions to the continuity equation system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Element::EquationIdVectorType EquationId; const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; double deltaPressure = 0; /* #pragma omp parallel */ { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); const unsigned int neighSize = neighb_nodes.size() + 1; if (neighSize > 1) { const double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); noalias(LHS_Contribution) = ZeroMatrix(neighSize, neighSize); noalias(RHS_Contribution) = ZeroVector(neighSize); if (EquationId.size() != neighSize) EquationId.resize(neighSize, false); double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } if (deviatoricCoeff > 0.1) { deviatoricCoeff = 0.1; } double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1); LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff; RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff; RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume; const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE); EquationId[0] = itNode->GetDof(PRESSURE, xDofPos).EquationId(); for (unsigned int i = 0; i < neighb_nodes.size(); i++) { EquationId[i + 1] = neighb_nodes[i].GetDof(PRESSURE, xDofPos).EquationId(); } #ifdef _OPENMP Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } } } KRATOS_CATCH("") } void BuildAll( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &b) { KRATOS_TRY KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl; //contributions to the continuity equation system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); Element::EquationIdVectorType EquationId; const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); const double timeInterval = CurrentProcessInfo[DELTA_TIME]; double deltaPressure = 0; /* #pragma omp parallel */ // { ModelPart::NodeIterator NodesBegin; ModelPart::NodeIterator NodesEnd; OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd); for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode) { NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES); const unsigned int neighSize = neighb_nodes.size() + 1; if (neighSize > 1) { // if (LHS_Contribution.size1() != neighSize) // LHS_Contribution.resize(neighSize, neighSize, false); //false says not to preserve existing storage!! // if (RHS_Contribution.size() != neighSize) // RHS_Contribution.resize(neighSize, false); //false says not to preserve existing storage!! // LHS_Contribution= ZeroMatrix(neighSize,neighSize); // RHS_Contribution= ZeroVector(neighSize); // if (EquationId.size() != neighSize) // EquationId.resize(neighSize, false); if (LHS_Contribution.size1() != 1) LHS_Contribution.resize(1, 1, false); //false says not to preserve existing storage!! if (RHS_Contribution.size() != 1) RHS_Contribution.resize(1, false); //false says not to preserve existing storage!! noalias(LHS_Contribution) = ZeroMatrix(1, 1); noalias(RHS_Contribution) = ZeroVector(1); if (EquationId.size() != 1) EquationId.resize(1, false); double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME); if (nodalVolume > 0) { // in interface nodes not in contact with fluid elements the nodal volume is zero double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT); double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR); if (yieldShear > 0) { double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT); double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE); double exponent = -adaptiveExponent * equivalentStrainRate; if (equivalentStrainRate != 0) { deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent)); } if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0) { // for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield deviatoricCoeff = adaptiveExponent * yieldShear; } } if (deviatoricCoeff > 0.1) { deviatoricCoeff = 0.1; } double volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS); deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1); LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff; RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff; RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume; } const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE); EquationId[0] = itNode->GetDof(PRESSURE, xDofPos).EquationId(); #ifdef _OPENMP Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array); #else Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } //} } // } ElementsArrayType &pElements = rModelPart.Elements(); int number_of_threads = ParallelUtilities::GetNumThreads(); #ifdef _OPENMP int A_size = A.size1(); //creating an array of lock variables of the size of the system matrix std::vector<omp_lock_t> lock_array(A.size1()); for (int i = 0; i < A_size; i++) omp_init_lock(&lock_array[i]); #endif DenseVector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); if (this->GetEchoLevel() > 0) { KRATOS_WATCH(number_of_threads); KRATOS_WATCH(element_partition); } #pragma omp parallel for firstprivate(number_of_threads) schedule(static, 1) for (int k = 0; k < number_of_threads; k++) { //contributions to the system LocalSystemMatrixType elementalLHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType elementalRHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType elementalEquationId; const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1]; unsigned int pos = (rModelPart.Nodes().begin())->GetDofPosition(PRESSURE); // assemble all elements for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it) { //calculate elemental contribution (*it)->CalculateLocalSystem(elementalLHS_Contribution, elementalRHS_Contribution, CurrentProcessInfo); Geometry<Node<3>> &geom = (*it)->GetGeometry(); if (elementalEquationId.size() != geom.size()) elementalEquationId.resize(geom.size(), false); for (unsigned int i = 0; i < geom.size(); i++) elementalEquationId[i] = geom[i].GetDof(PRESSURE, pos).EquationId(); //assemble the elemental contribution #ifdef _OPENMP this->Assemble(A, b, elementalLHS_Contribution, elementalRHS_Contribution, elementalEquationId, lock_array); #else this->Assemble(A, b, elementalLHS_Contribution, elementalRHS_Contribution, elementalEquationId); #endif } } #ifdef _OPENMP for (int i = 0; i < A_size; i++) omp_destroy_lock(&lock_array[i]); #endif KRATOS_CATCH("") } /** * @brief This is a call to the linear system solver * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void SystemSolve( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else TSparseSpace::SetToZero(Dx); // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuity", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** *@brief This is a call to the linear system solver (taking into account some physical particularities of the problem) * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector * @param rModelPart The model part of the problem to solve */ void SystemSolveWithPhysics( TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b, ModelPart &rModelPart) { KRATOS_TRY double norm_b; if (TSparseSpace::Size(b) != 0) norm_b = TSparseSpace::TwoNorm(b); else norm_b = 0.00; if (norm_b != 0.00) { //provide physical data as needed if (BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded()) BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart); //do solve BaseType::mpLinearSystemSolver->Solve(A, Dx, b); } else { TSparseSpace::SetToZero(Dx); KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolverContinuity", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl; } // Prints informations about the current time KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuity", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl; KRATOS_CATCH("") } /** * @brief Function to perform the building and solving phase at the same time. * @details It is ideally the fastest and safer function to use when it is possible to solve * just after building * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void BuildAndSolve( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { KRATOS_TRY Timer::Start("Build"); /* boost::timer c_build_time; */ ///////////////////////////////// ALL NODAL ///////////////////////////////// //BuildNodally(pScheme, rModelPart, A, b); ///////////////////////////////// ALL NODAL ///////////////////////////////// // /////////////////////// NODAL + ELEMENTAL LAPLACIAN /////////////////////// //BuildNodallyUnlessLaplacian(pScheme, rModelPart, A, b); //Build(pScheme, rModelPart, A, b); // /////////////////////// NODAL + ELEMENTAL LAPLACIAN /////////////////////// //////////////// NODAL + ELEMENTAL VOLUMETRIC STABILIZED TERMS//////////////// //BuildNodallyNoVolumetricStabilizedTerms(pScheme, rModelPart, A, b); //Build(pScheme, rModelPart, A, b); // /////////////////////// NODAL + ELEMENTAL LAPLACIAN /////////////////////// /////////////////////// NODAL + ELEMENTAL STABILIZATION ////////////////////// // BuildNodallyNotStabilized(pScheme, rModelPart, A, b); // Build(pScheme, rModelPart, A, b); BuildAll(pScheme, rModelPart, A, b); /////////////////////// NODAL + ELEMENTAL STABILIZATION ////////////////////// //////////////////////// ALL ELEMENTAL (FOR HYBRID) ////////////////////////// //Build(pScheme, rModelPart, A, b); //////////////////////// ALL ELEMENTAL (FOR HYBRID) ////////////////////////// Timer::Stop("Build"); // ApplyPointLoads(pScheme,rModelPart,b); // Does nothing...dirichlet conditions are naturally dealt with in defining the residual ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b); KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "Before the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; /* const double start_solve = OpenMPUtils::GetCurrentTime(); */ Timer::Start("Solve"); /* boost::timer c_solve_time; */ SystemSolveWithPhysics(A, Dx, b, rModelPart); /* std::cout << "CONTINUITY EQ: solve_time : " << c_solve_time.elapsed() << std::endl; */ Timer::Stop("Solve"); /* const double stop_solve = OpenMPUtils::GetCurrentTime(); */ KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "After the solution of the system" << "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl; KRATOS_CATCH("") } void Build( typename TSchemeType::Pointer pScheme, ModelPart &r_model_part, TSystemMatrixType &A, TSystemVectorType &b) override { KRATOS_TRY if (!pScheme) KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", ""); //getting the elements from the model ElementsArrayType &pElements = r_model_part.Elements(); // //getting the array of the conditions // ConditionsArrayType& ConditionsArray = r_model_part.Conditions(); //resetting to zero the vector of reactions TSparseSpace::SetToZero(*(BaseType::mpReactionsVector)); //create a partition of the element array int number_of_threads = ParallelUtilities::GetNumThreads(); #ifdef _OPENMP int A_size = A.size1(); //creating an array of lock variables of the size of the system matrix std::vector<omp_lock_t> lock_array(A.size1()); for (int i = 0; i < A_size; i++) omp_init_lock(&lock_array[i]); #endif DenseVector<unsigned int> element_partition; CreatePartition(number_of_threads, pElements.size(), element_partition); if (this->GetEchoLevel() > 0) { KRATOS_WATCH(number_of_threads); KRATOS_WATCH(element_partition); } // double start_prod = OpenMPUtils::GetCurrentTime(); #pragma omp parallel for firstprivate(number_of_threads) schedule(static, 1) for (int k = 0; k < number_of_threads; k++) { //contributions to the system LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0); LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0); //vector containing the localization in the system of the different //terms Element::EquationIdVectorType EquationId; const ProcessInfo &CurrentProcessInfo = r_model_part.GetProcessInfo(); typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k]; typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1]; unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(PRESSURE); // assemble all elements for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it) { //calculate elemental contribution (*it)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo); Geometry<Node<3>> &geom = (*it)->GetGeometry(); if (EquationId.size() != geom.size()) EquationId.resize(geom.size(), false); for (unsigned int i = 0; i < geom.size(); i++) EquationId[i] = geom[i].GetDof(PRESSURE, pos).EquationId(); //assemble the elemental contribution #ifdef _OPENMP this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array); #else this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId); #endif } } // if (this->GetEchoLevel() > 0) // { // double stop_prod = OpenMPUtils::GetCurrentTime(); // std::cout << "parallel building time: " << stop_prod - start_prod << std::endl; // } #ifdef _OPENMP for (int i = 0; i < A_size; i++) omp_destroy_lock(&lock_array[i]); #endif KRATOS_CATCH("") } /** * @brief Builds the list of the DofSets involved in the problem by "asking" to each element * and condition its Dofs. * @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the * way the matrix and RHS are built * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve */ void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart) override { KRATOS_TRY; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuity", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl; //Gets the array of elements from the modeler ElementsArrayType &pElements = rModelPart.Elements(); const int nelements = static_cast<int>(pElements.size()); Element::DofsVectorType ElementalDofList; const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo(); unsigned int nthreads = ParallelUtilities::GetNumThreads(); // typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type; // typedef std::unordered_set < NodeType::DofType::Pointer, // DofPointerHasher, // DofPointerComparor, // allocator_type > set_type; #ifdef USE_GOOGLE_HASH typedef google::dense_hash_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #else typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type; #endif // std::vector<set_type> dofs_aux_list(nthreads); // std::vector<allocator_type> allocators(nthreads); for (int i = 0; i < static_cast<int>(nthreads); i++) { #ifdef USE_GOOGLE_HASH dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer()); #else // dofs_aux_list[i] = set_type( allocators[i]); dofs_aux_list[i].reserve(nelements); #endif } // #pragma omp parallel for firstprivate(nelements, ElementalDofList) for (int i = 0; i < static_cast<int>(nelements); ++i) { auto it_elem = pElements.begin() + i; const IndexType this_thread_id = OpenMPUtils::ThisThread(); // Gets list of Dof involved on every element pScheme->GetDofList(*it_elem, ElementalDofList, CurrentProcessInfo); dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); } // ConditionsArrayType& pConditions = rModelPart.Conditions(); // const int nconditions = static_cast<int>(pConditions.size()); // #pragma omp parallel for firstprivate(nconditions, ElementalDofList) // for (int i = 0; i < nconditions; i++) // { // typename ConditionsArrayType::iterator it = pConditions.begin() + i; // const unsigned int this_thread_id = OpenMPUtils::ThisThread(); // // gets list of Dof involved on every element // pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo); // dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end()); // } //here we do a reduction in a tree so to have everything on thread 0 unsigned int old_max = nthreads; unsigned int new_max = ceil(0.5 * static_cast<double>(old_max)); while (new_max >= 1 && new_max != old_max) { // //just for debugging // std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl; // for (int i = 0; i < new_max; i++) // { // if (i + new_max < old_max) // { // std::cout << i << " - " << i + new_max << std::endl; // } // } // std::cout << "********************" << std::endl; #pragma omp parallel for for (int i = 0; i < static_cast<int>(new_max); i++) { if (i + new_max < old_max) { dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end()); dofs_aux_list[i + new_max].clear(); } } old_max = new_max; new_max = ceil(0.5 * static_cast<double>(old_max)); } DofsArrayType Doftemp; BaseType::mDofSet = DofsArrayType(); Doftemp.reserve(dofs_aux_list[0].size()); for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++) { Doftemp.push_back((*it)); } Doftemp.Sort(); BaseType::mDofSet = Doftemp; // Throws an execption if there are no Degrees of freedom involved in the analysis KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl; BaseType::mDofSetIsInitialized = true; KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuity", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl; #ifdef _OPENMP if (mlock_array.size() != 0) { for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_destroy_lock(&mlock_array[i]); } mlock_array.resize(BaseType::mDofSet.size()); for (int i = 0; i < static_cast<int>(mlock_array.size()); i++) omp_init_lock(&mlock_array[i]); #endif // If reactions are to be calculated, we check if all the dofs have reactions defined // This is tobe done only in debug mode #ifdef KRATOS_DEBUG if (BaseType::GetCalculateReactionsFlag()) { for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) { KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl << "Node : " << dof_iterator->Id() << std::endl << "Dof : " << (*dof_iterator) << std::endl << "Not possible to calculate reactions." << std::endl; } } #endif KRATOS_CATCH(""); } /** * @brief Organises the dofset in order to speed up the building phase * @param rModelPart The model part of the problem to solve */ void SetUpSystem( ModelPart &rModelPart) override { // Set equation id for degrees of freedom // the free degrees of freedom are positioned at the beginning of the system, // while the fixed one are at the end (in opposite order). // // that means that if the EquationId is greater than "mEquationSystemSize" // the pointed degree of freedom is restrained // int free_id = 0; int fix_id = BaseType::mDofSet.size(); for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator) if (dof_iterator->IsFixed()) dof_iterator->SetEquationId(--fix_id); else dof_iterator->SetEquationId(free_id++); BaseType::mEquationSystemSize = fix_id; } //************************************************************************** //************************************************************************** void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme, TSystemMatrixPointerType &pA, TSystemVectorPointerType &pDx, TSystemVectorPointerType &pb, ModelPart &rModelPart) override { KRATOS_TRY /* boost::timer c_contruct_matrix; */ if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0)); pA.swap(pNewA); } if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0)); pDx.swap(pNewDx); } if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0)); pb.swap(pNewb); } if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix { TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0)); BaseType::mpReactionsVector.swap(pNewReactionsVector); } TSystemMatrixType &A = *pA; TSystemVectorType &Dx = *pDx; TSystemVectorType &b = *pb; //resizing the system vectors and matrix if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized { A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false); ConstructMatrixStructure(pScheme, A, rModelPart); } else { if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize) { KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW"); KRATOS_ERROR << "The equation system size has changed during the simulation. This is not permited." << std::endl; A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true); ConstructMatrixStructure(pScheme, A, rModelPart); } } if (Dx.size() != BaseType::mEquationSystemSize) Dx.resize(BaseType::mEquationSystemSize, false); if (b.size() != BaseType::mEquationSystemSize) b.resize(BaseType::mEquationSystemSize, false); //if needed resize the vector for the calculation of reactions if (BaseType::mCalculateReactionsFlag == true) { unsigned int ReactionsVectorSize = BaseType::mDofSet.size(); if (BaseType::mpReactionsVector->size() != ReactionsVectorSize) BaseType::mpReactionsVector->resize(ReactionsVectorSize, false); } /* std::cout << "CONTINUITY EQ: contruct_matrix : " << c_contruct_matrix.elapsed() << std::endl; */ KRATOS_CATCH("") } //************************************************************************** //************************************************************************** /** * @brief Applies the dirichlet conditions. This operation may be very heavy or completely * unexpensive depending on the implementation choosen and on how the System Matrix is built. * @details For explanation of how it works for a particular implementation the user * should refer to the particular Builder And Solver choosen * @param pScheme The integration scheme considered * @param rModelPart The model part of the problem to solve * @param A The LHS matrix * @param Dx The Unknowns vector * @param b The RHS vector */ void ApplyDirichletConditions( typename TSchemeType::Pointer pScheme, ModelPart &rModelPart, TSystemMatrixType &A, TSystemVectorType &Dx, TSystemVectorType &b) override { } /** * @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed */ void Clear() override { this->mDofSet = DofsArrayType(); if (this->mpReactionsVector != NULL) TSparseSpace::Clear((this->mpReactionsVector)); // this->mReactionsVector = TSystemVectorType(); this->mpLinearSystemSolver->Clear(); KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuity", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl; } /** * @brief This function is designed to be called once to perform all the checks needed * on the input provided. Checks can be "expensive" as the function is designed * to catch user's errors. * @param rModelPart The model part of the problem to solve * @return 0 all ok */ int Check(ModelPart &rModelPart) override { KRATOS_TRY return 0; KRATOS_CATCH(""); } ///@} ///@name Access ///@{ ///@} ///@name Inquiry ///@{ ///@} ///@name Friends ///@{ ///@} protected: ///@name Protected static Member Variables ///@{ ///@} ///@name Protected member Variables ///@{ ///@} ///@name Protected Operators ///@{ ///@} ///@name Protected Operations ///@{ void Assemble( TSystemMatrixType &A, TSystemVectorType &b, const LocalSystemMatrixType &LHS_Contribution, const LocalSystemVectorType &RHS_Contribution, const Element::EquationIdVectorType &EquationId #ifdef _OPENMP , std::vector<omp_lock_t> &lock_array #endif ) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { #ifdef _OPENMP omp_set_lock(&lock_array[i_global]); #endif b[i_global] += RHS_Contribution(i_local); for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) { A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } #ifdef _OPENMP omp_unset_lock(&lock_array[i_global]); #endif } //note that assembly on fixed rows is not performed here } } //************************************************************************** virtual void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme, TSystemMatrixType &A, ModelPart &rModelPart) { //filling with zero the matrix (creating the structure) Timer::Start("MatrixStructure"); const std::size_t equation_size = BaseType::mEquationSystemSize; std::vector<std::unordered_set<std::size_t>> indices(equation_size); #pragma omp parallel for firstprivate(equation_size) for (int iii = 0; iii < static_cast<int>(equation_size); iii++) { indices[iii].reserve(40); } Element::EquationIdVectorType ids(3, 0); #pragma omp parallel firstprivate(ids) { // The process info ProcessInfo &r_current_process_info = rModelPart.GetProcessInfo(); // We repeat the same declaration for each thead std::vector<std::unordered_set<std::size_t>> temp_indexes(equation_size); #pragma omp for for (int index = 0; index < static_cast<int>(equation_size); ++index) temp_indexes[index].reserve(30); // Getting the size of the array of elements from the model const int number_of_elements = static_cast<int>(rModelPart.Elements().size()); // Element initial iterator const auto el_begin = rModelPart.ElementsBegin(); // We iterate over the elements #pragma omp for schedule(guided, 512) nowait for (int i_elem = 0; i_elem < number_of_elements; ++i_elem) { auto it_elem = el_begin + i_elem; pScheme->EquationId(*it_elem, ids, r_current_process_info); for (auto &id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto &row_indices = temp_indexes[id_i]; for (auto &id_j : ids) if (id_j < BaseType::mEquationSystemSize) row_indices.insert(id_j); } } } // Getting the size of the array of the conditions const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size()); // Condition initial iterator const auto cond_begin = rModelPart.ConditionsBegin(); // We iterate over the conditions #pragma omp for schedule(guided, 512) nowait for (int i_cond = 0; i_cond < number_of_conditions; ++i_cond) { auto it_cond = cond_begin + i_cond; pScheme->EquationId(*it_cond, ids, r_current_process_info); for (auto &id_i : ids) { if (id_i < BaseType::mEquationSystemSize) { auto &row_indices = temp_indexes[id_i]; for (auto &id_j : ids) if (id_j < BaseType::mEquationSystemSize) row_indices.insert(id_j); } } } // Merging all the temporal indexes #pragma omp critical { for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i) { indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end()); } } } //count the row sizes unsigned int nnz = 0; for (unsigned int i = 0; i < indices.size(); i++) nnz += indices[i].size(); A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz); double *Avalues = A.value_data().begin(); std::size_t *Arow_indices = A.index1_data().begin(); std::size_t *Acol_indices = A.index2_data().begin(); //filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP! Arow_indices[0] = 0; for (int i = 0; i < static_cast<int>(A.size1()); i++) Arow_indices[i + 1] = Arow_indices[i] + indices[i].size(); #pragma omp parallel for for (int i = 0; i < static_cast<int>(A.size1()); i++) { const unsigned int row_begin = Arow_indices[i]; const unsigned int row_end = Arow_indices[i + 1]; unsigned int k = row_begin; for (auto it = indices[i].begin(); it != indices[i].end(); it++) { Acol_indices[k] = *it; Avalues[k] = 0.0; k++; } std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]); } A.set_filled(indices.size() + 1, nnz); Timer::Stop("MatrixStructure"); } void AssembleLHS( TSystemMatrixType &A, LocalSystemMatrixType &LHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { unsigned int j_global = EquationId[j_local]; if (j_global < BaseType::mEquationSystemSize) A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Protected Access ///@{ ///@} ///@name Protected Inquiry ///@{ ///@} ///@name Protected LifeCycle ///@{ ///@} private: ///@name Static Member Variables ///@{ ///@} ///@name Member Variables ///@{ #ifdef _OPENMP std::vector<omp_lock_t> mlock_array; #endif ///@} ///@name Private Operators ///@{ ///@} ///@name Private Operations ///@{ inline void AddUnique(std::vector<std::size_t> &v, const std::size_t &candidate) { std::vector<std::size_t>::iterator i = v.begin(); std::vector<std::size_t>::iterator endit = v.end(); while (i != endit && (*i) != candidate) { i++; } if (i == endit) { v.push_back(candidate); } } inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int> &partitions) { partitions.resize(number_of_threads + 1); int partition_size = number_of_rows / number_of_threads; partitions[0] = 0; partitions[number_of_threads] = number_of_rows; for (unsigned int i = 1; i < number_of_threads; i++) partitions[i] = partitions[i - 1] + partition_size; } void AssembleRHS( TSystemVectorType &b, const LocalSystemVectorType &RHS_Contribution, const Element::EquationIdVectorType &EquationId) { unsigned int local_size = RHS_Contribution.size(); if (BaseType::mCalculateReactionsFlag == false) { for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double &b_value = b[i_global]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } else { TSystemVectorType &ReactionsVector = *BaseType::mpReactionsVector; for (unsigned int i_local = 0; i_local < local_size; i_local++) { const unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) //free dof { // ASSEMBLING THE SYSTEM VECTOR double &b_value = b[i_global]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } else //fixed dof { double &b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize]; const double &rhs_value = RHS_Contribution[i_local]; #pragma omp atomic b_value += rhs_value; } } } } //************************************************************************** void AssembleLHS_CompleteOnFreeRows( TSystemMatrixType &A, LocalSystemMatrixType &LHS_Contribution, Element::EquationIdVectorType &EquationId) { unsigned int local_size = LHS_Contribution.size1(); for (unsigned int i_local = 0; i_local < local_size; i_local++) { unsigned int i_global = EquationId[i_local]; if (i_global < BaseType::mEquationSystemSize) { for (unsigned int j_local = 0; j_local < local_size; j_local++) { int j_global = EquationId[j_local]; A(i_global, j_global) += LHS_Contribution(i_local, j_local); } } } } ///@} ///@name Private Operations ///@{ ///@} ///@name Private Access ///@{ ///@} ///@name Private Inquiry ///@{ ///@} ///@name Un accessible methods ///@{ ///@} }; /* Class NodalResidualBasedEliminationBuilderAndSolverContinuity */ ///@} ///@name Type Definitions ///@{ ///@} } /* namespace Kratos.*/ #endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER defined */
Parallelizer.h
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2010 Gael Guennebaud <gael.guennebaud@inria.fr> // // Eigen is free software; you can redistribute it and/or // modify it under the terms of the GNU Lesser General Public // License as published by the Free Software Foundation; either // version 3 of the License, or (at your option) any later version. // // Alternatively, you can redistribute it and/or // modify it under the terms of the GNU General Public License as // published by the Free Software Foundation; either version 2 of // the License, or (at your option) any later version. // // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the // GNU General Public License for more details. // // You should have received a copy of the GNU Lesser General Public // License and a copy of the GNU General Public License along with // Eigen. If not, see <http://www.gnu.org/licenses/>. #ifndef EIGEN_PARALLELIZER_H #define EIGEN_PARALLELIZER_H namespace Eigen { namespace internal { /** \internal */ inline void manage_multi_threading(Action action, int* v) { static EIGEN_UNUSED int m_maxThreads = -1; if(action==SetAction) { eigen_internal_assert(v!=0); m_maxThreads = *v; } else if(action==GetAction) { eigen_internal_assert(v!=0); #ifdef EIGEN_HAS_OPENMP if(m_maxThreads>0) *v = m_maxThreads; else *v = omp_get_max_threads(); #else *v = 1; #endif } else { eigen_internal_assert(false); } } /** \returns the max number of threads reserved for Eigen * \sa setNbThreads */ inline int nbThreads() { int ret; manage_multi_threading(GetAction, &ret); return ret; } /** Sets the max number of threads reserved for Eigen * \sa nbThreads */ inline void setNbThreads(int v) { manage_multi_threading(SetAction, &v); } template<typename Index> struct GemmParallelInfo { GemmParallelInfo() : sync(-1), users(0), rhs_start(0), rhs_length(0) {} int volatile sync; int volatile users; Index rhs_start; Index rhs_length; }; template<bool Condition, typename Functor, typename Index> void parallelize_gemm(const Functor& func, Index rows, Index cols, bool transpose) { // TODO when EIGEN_USE_BLAS is defined, // we should still enable OMP for other scalar types #if !(defined (EIGEN_HAS_OPENMP)) || defined (EIGEN_USE_BLAS) // FIXME the transpose variable is only needed to properly split // the matrix product when multithreading is enabled. This is a temporary // fix to support row-major destination matrices. This whole // parallelizer mechanism has to be redisigned anyway. EIGEN_UNUSED_VARIABLE(transpose); func(0,rows, 0,cols); #else // Dynamically check whether we should enable or disable OpenMP. // The conditions are: // - the max number of threads we can create is greater than 1 // - we are not already in a parallel code // - the sizes are large enough // 1- are we already in a parallel session? // FIXME omp_get_num_threads()>1 only works for openmp, what if the user does not use openmp? if((!Condition) || (omp_get_num_threads()>1)) return func(0,rows, 0,cols); Index size = transpose ? cols : rows; // 2- compute the maximal number of threads from the size of the product: // FIXME this has to be fine tuned Index max_threads = std::max<Index>(1,size / 32); // 3 - compute the number of threads we are going to use Index threads = std::min<Index>(nbThreads(), max_threads); if(threads==1) return func(0,rows, 0,cols); func.initParallelSession(); if(transpose) std::swap(rows,cols); Index blockCols = (cols / threads) & ~Index(0x3); Index blockRows = (rows / threads) & ~Index(0x7); GemmParallelInfo<Index>* info = new GemmParallelInfo<Index>[threads]; #pragma omp parallel for schedule(static,1) num_threads(threads) for(Index i=0; i<threads; ++i) { Index r0 = i*blockRows; Index actualBlockRows = (i+1==threads) ? rows-r0 : blockRows; Index c0 = i*blockCols; Index actualBlockCols = (i+1==threads) ? cols-c0 : blockCols; info[i].rhs_start = c0; info[i].rhs_length = actualBlockCols; if(transpose) func(0, cols, r0, actualBlockRows, info); else func(r0, actualBlockRows, 0,cols, info); } delete[] info; #endif } } // end namespace internal } // end namespace Eigen #endif // EIGEN_PARALLELIZER_H
GB_unop__asinh_fp32_fp32.c
//------------------------------------------------------------------------------ // GB_unop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_atomics.h" #include "GB_unop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop_apply__asinh_fp32_fp32 // op(A') function: GB_unop_tran__asinh_fp32_fp32 // C type: float // A type: float // cast: float cij = aij // unaryop: cij = asinhf (aij) #define GB_ATYPE \ float #define GB_CTYPE \ float // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ float aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = asinhf (x) ; // casting #define GB_CAST(z, aij) \ float z = aij ; // cij = op (aij) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ float aij = Ax [pA] ; \ /* Cx [pC] = op (cast (aij)) */ \ float z = aij ; \ Cx [pC] = asinhf (z) ; \ } // true if operator is the identity op with no typecasting #define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \ 0 // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_ASINH || GxB_NO_FP32) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_apply__asinh_fp32_fp32 ( float *Cx, // Cx and Ax may be aliased const float *Ax, const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; if (Ab == NULL) { #if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST ) GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ; #else #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { float aij = Ax [p] ; float z = aij ; Cx [p] = asinhf (z) ; } #endif } else { // bitmap case, no transpose; A->b already memcpy'd into C->b #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!Ab [p]) continue ; float aij = Ax [p] ; float z = aij ; Cx [p] = asinhf (z) ; } } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop_tran__asinh_fp32_fp32 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Workspaces, const int64_t *GB_RESTRICT A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
singleConstruct.c
int main() { int x = 10; #pragma omp parallel { int localX = 2; #pragma omp single { x = x + 10; } localX = x; } x = 30; }
jacobi-task-dep.c
# include "poisson.h" /* #pragma omp task depend version of SWEEP. */ void sweep_task_dep (int nx, int ny, double dx, double dy, double *f_, int itold, int itnew, double *u_, double *unew_, int block_size) { int i; int it; int j; double (*f)[nx][ny] = (double (*)[nx][ny])f_; double (*u)[nx][ny] = (double (*)[nx][ny])u_; double (*unew)[nx][ny] = (double (*)[nx][ny])unew_; #pragma omp parallel shared (u, unew, f) private (i, j, it) firstprivate(nx, ny, dx, dy, itold, itnew) #pragma omp single { for (it = itold + 1; it <= itnew; it++) { // Save the current estimate. for (i = 0; i < nx; i++) { #pragma omp task shared(u, unew) firstprivate(i) private(j) depend(in: unew[i]) depend(out: u[i]) for (j = 0; j < ny; j++) { (*u)[i][j] = (*unew)[i][j]; } } // Compute a new estimate. for (i = 0; i < nx; i++) { #pragma omp task shared(u, unew, f) firstprivate(i, nx, ny, dx, dy) private(j) depend(in: f[i], u[i-1], u[i], u[i+1]) depend(out: unew[i]) for (j = 0; j < ny; j++) { if (i == 0 || j == 0 || i == nx - 1 || j == ny - 1) { (*unew)[i][j] = (*f)[i][j]; } else { (*unew)[i][j] = 0.25 * ((*u)[i-1][j] + (*u)[i][j+1] + (*u)[i][j-1] + (*u)[i+1][j] + (*f)[i][j] * dx * dy); } } } } } }
cofold.c
/* * minimum free energy * RNA secondary structure prediction * * c Ivo Hofacker, Chrisoph Flamm * original implementation by * Walter Fontana * * Vienna RNA package */ #ifdef HAVE_CONFIG_H #include "config.h" #endif #include <stdio.h> #include <stdlib.h> #include <math.h> #include <ctype.h> #include <string.h> #include <limits.h> #include "ViennaRNA/utils/basic.h" #include "ViennaRNA/utils/strings.h" #include "ViennaRNA/utils/structures.h" #include "ViennaRNA/params/default.h" #include "ViennaRNA/fold_vars.h" #include "ViennaRNA/params/basic.h" #include "ViennaRNA/subopt.h" #include "ViennaRNA/fold.h" #include "ViennaRNA/loops/all.h" #include "ViennaRNA/gquad.h" #include "ViennaRNA/alphabet.h" #include "ViennaRNA/cofold.h" #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY #ifdef _OPENMP #include <omp.h> #endif #endif #define MAXSECTORS 500 /* dimension for a backtrack array */ /* ################################# # GLOBAL VARIABLES # ################################# */ /* ################################# # PRIVATE VARIABLES # ################################# */ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY /* some backward compatibility stuff */ PRIVATE int backward_compat = 0; PRIVATE vrna_fold_compound_t *backward_compat_compound = NULL; PRIVATE float mfe1, mfe2; /* minimum free energies of the monomers */ #ifdef _OPENMP #pragma omp threadprivate(mfe1, mfe2, backward_compat_compound, backward_compat) #endif #endif /* ################################# # PRIVATE FUNCTION DECLARATIONS # ################################# */ PRIVATE void backtrack(sect bt_stack[], vrna_bp_stack_t *bp_list, vrna_fold_compound_t *vc); PRIVATE int fill_arrays(vrna_fold_compound_t *vc, int zuker); PRIVATE void free_end(int *array, int i, int start, vrna_fold_compound_t *vc); PRIVATE void doubleseq(vrna_fold_compound_t *vc); /* do magic */ PRIVATE void halfseq(vrna_fold_compound_t *vc); /* undo magic */ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY /* wrappers for old API compatibility */ PRIVATE void wrap_array_export(int **f5_p, int **c_p, int **fML_p, int **fM1_p, int **fc_p, int **indx_p, char **ptype_p); PRIVATE float wrap_cofold(const char *string, char *structure, vrna_param_t *parameters, int is_constrained); PRIVATE SOLUTION *wrap_zukersubopt(const char *string, vrna_param_t *parameters); #endif /* ################################# # BEGIN OF FUNCTION DEFINITIONS # ################################# */ PUBLIC float vrna_mfe_dimer(vrna_fold_compound_t *vc, char *structure) { int length, energy; char *s; sect bt_stack[MAXSECTORS]; /* stack of partial structures for backtracking */ vrna_bp_stack_t *bp; length = (int)vc->length; vc->sequence_encoding[0] = vc->sequence_encoding2[0]; /* store length at pos. 0 in S1 too */ if (!vrna_fold_compound_prepare(vc, VRNA_OPTION_MFE | VRNA_OPTION_HYBRID)) { vrna_message_warning("vrna_mfe_dimer@cofold.c: Failed to prepare vrna_fold_compound"); return (float)(INF / 100.); } /* call user-defined recursion status callback function */ if (vc->stat_cb) vc->stat_cb(VRNA_STATUS_MFE_PRE, vc->auxdata); energy = fill_arrays(vc, 0); /* call user-defined recursion status callback function */ if (vc->stat_cb) vc->stat_cb(VRNA_STATUS_MFE_POST, vc->auxdata); if (structure && vc->params->model_details.backtrack) { bp = (vrna_bp_stack_t *)vrna_alloc(sizeof(vrna_bp_stack_t) * (4 * (1 + length / 2))); /* add a guess of how many G's may be involved in a G quadruplex */ backtrack(bt_stack, bp, vc); s = vrna_db_from_bp_stack(bp, length); strncpy(structure, s, length + 1); free(s); free(bp); } if (vc->params->model_details.backtrack_type == 'C') return (float)vc->matrices->c[vc->jindx[length] + 1] / 100.; else if (vc->params->model_details.backtrack_type == 'M') return (float)vc->matrices->fML[vc->jindx[length] + 1] / 100.; else return (float)energy / 100.; } PRIVATE int fill_arrays(vrna_fold_compound_t *vc, int zuker) { /* fill "c", "fML" and "f5" arrays and return optimal energy */ unsigned int strands, *sn, *ss, *se; int i, j, length, energy; int uniq_ML; int no_close, type, maxj, *indx; int *my_f5, *my_c, *my_fML, *my_fM1, *my_fc; int *cc, *cc1; /* auxilary arrays for canonical structures */ int *Fmi; /* holds row i of fML (avoids jumps in memory) */ int *DMLi; /* DMLi[j] holds MIN(fML[i,k]+fML[k+1,j]) */ int *DMLi1; /* MIN(fML[i+1,k]+fML[k+1,j]) */ int *DMLi2; /* MIN(fML[i+2,k]+fML[k+1,j]) */ int dangle_model, noGUclosure, noLP, hc_decompose, turn; char *ptype; unsigned char *hard_constraints; vrna_param_t *P; vrna_mx_mfe_t *matrices; vrna_hc_t *hc; length = (int)vc->length; ptype = vc->ptype; indx = vc->jindx; P = vc->params; dangle_model = P->model_details.dangles; noGUclosure = P->model_details.noGUclosure; noLP = P->model_details.noLP; uniq_ML = P->model_details.uniq_ML; strands = vc->strands; sn = vc->strand_number; ss = vc->strand_start; se = vc->strand_end; hc = vc->hc; hard_constraints = hc->mx; matrices = vc->matrices; my_f5 = matrices->f5; my_c = matrices->c; my_fML = matrices->fML; my_fM1 = matrices->fM1; my_fc = matrices->fc; turn = P->model_details.min_loop_size; /* allocate memory for all helper arrays */ cc = (int *)vrna_alloc(sizeof(int) * (length + 2)); cc1 = (int *)vrna_alloc(sizeof(int) * (length + 2)); Fmi = (int *)vrna_alloc(sizeof(int) * (length + 1)); DMLi = (int *)vrna_alloc(sizeof(int) * (length + 1)); DMLi1 = (int *)vrna_alloc(sizeof(int) * (length + 1)); DMLi2 = (int *)vrna_alloc(sizeof(int) * (length + 1)); /* hard code min_loop_size to 0, since we can not be sure yet that this is already the case */ turn = 0; for (j = 1; j <= length; j++) { Fmi[j] = DMLi[j] = DMLi1[j] = DMLi2[j] = INF; my_fc[j] = 0; } for (j = 1; j <= length; j++) for (i = 1; i <= j; i++) { my_c[indx[j] + i] = my_fML[indx[j] + i] = INF; if (uniq_ML) my_fM1[indx[j] + i] = INF; } for (i = length - turn - 1; i >= 1; i--) { /* i,j in [1..length] */ maxj = (zuker) ? (MIN2(i + se[0], length)) : length; for (j = i + turn + 1; j <= maxj; j++) { int ij; ij = indx[j] + i; type = vrna_get_ptype(ij, ptype); hc_decompose = hard_constraints[length * i + j]; energy = INF; no_close = (((type == 3) || (type == 4)) && noGUclosure); if (hc_decompose) { /* we have a pair */ int new_c = INF; if (!no_close) { /* check for hairpin loop */ energy = vrna_E_hp_loop(vc, i, j); new_c = MIN2(new_c, energy); /* check for multibranch loops */ energy = vrna_E_mb_loop_fast(vc, i, j, DMLi1, DMLi2); new_c = MIN2(new_c, energy); } if (dangle_model == 3) { /* coaxial stacking */ energy = vrna_E_mb_loop_stack(vc, i, j); new_c = MIN2(new_c, energy); } /* check for interior loops */ energy = vrna_E_int_loop(vc, i, j); new_c = MIN2(new_c, energy); /* remember stack energy for --noLP option */ if (noLP) { if ((sn[i] == sn[i + 1]) && (sn[j - 1] == sn[j])) { int stackEnergy = vrna_E_stack(vc, i, j); new_c = MIN2(new_c, cc1[j - 1] + stackEnergy); my_c[ij] = cc1[j - 1] + stackEnergy; } else { /* currently we don't allow stacking over the cut point */ my_c[ij] = FORBIDDEN; } cc[j] = new_c; } else { my_c[ij] = new_c; } } /* end >> if (pair) << */ else { my_c[ij] = INF; } /* done with c[i,j], now compute fML[i,j] */ /* free ends ? -----------------------------------------*/ my_fML[ij] = vrna_E_ml_stems_fast(vc, i, j, Fmi, DMLi); if (uniq_ML) /* compute fM1 for unique decomposition */ my_fM1[ij] = E_ml_rightmost_stem(i, j, vc); } if (i == se[0] + 1) for (j = i; j <= maxj; j++) free_end(my_fc, j, ss[1], vc); if (i <= se[0]) free_end(my_fc, i, se[0], vc); { int *FF; /* rotate the auxilliary arrays */ FF = DMLi2; DMLi2 = DMLi1; DMLi1 = DMLi; DMLi = FF; FF = cc1; cc1 = cc; cc = FF; for (j = 1; j <= maxj; j++) cc[j] = Fmi[j] = DMLi[j] = INF; } } /* calculate energies of 5' and 3' fragments */ for (i = 1; i <= length; i++) free_end(my_f5, i, 1, vc); if (strands > 1) { mfe1 = my_f5[se[0]]; mfe2 = my_fc[length]; /* add DuplexInit, check whether duplex*/ for (i = ss[1]; i <= length; i++) my_f5[i] = MIN2(my_f5[i] + P->DuplexInit, my_fc[i] + my_fc[1]); } energy = my_f5[length]; if (strands == 1) mfe1 = mfe2 = energy; /* clean up memory */ free(cc); free(cc1); free(Fmi); free(DMLi); free(DMLi1); free(DMLi2); return energy; } PRIVATE void backtrack_co(sect bt_stack[], vrna_bp_stack_t *bp_list, int s, int b, /* b=0: start new structure, b \ne 0: add to existing structure */ vrna_fold_compound_t *vc) { /*------------------------------------------------------------------ * trace back through the "c", "fc", "f5" and "fML" arrays to get the * base pairing list. No search for equivalent structures is done. * This is fast, since only few structure elements are recalculated. * ------------------------------------------------------------------*/ unsigned int *se; int i, j, ij, k, length, no_close, type; char *string = vc->sequence; vrna_param_t *P = vc->params; int *indx = vc->jindx; char *ptype = vc->ptype; int noLP = P->model_details.noLP; int noGUclosure = P->model_details.noGUclosure; char backtrack_type = P->model_details.backtrack_type; /* the folding matrices */ int *my_c; length = vc->length; my_c = vc->matrices->c; se = vc->strand_end; /* int b=0;*/ length = strlen(string); if (s == 0) { bt_stack[++s].i = 1; bt_stack[s].j = length; bt_stack[s].ml = (backtrack_type == 'M') ? 1 : ((backtrack_type == 'C') ? 2 : 0); } while (s > 0) { int ml, cij; int canonical = 1; /* (i,j) closes a canonical structure */ /* pop one element from stack */ i = bt_stack[s].i; j = bt_stack[s].j; ml = bt_stack[s--].ml; switch (ml) { /* backtrack in f5 */ case 0: { int p, q; if (vrna_BT_ext_loop_f5(vc, &j, &p, &q, bp_list, &b)) { if (j > 0) { bt_stack[++s].i = 1; bt_stack[s].j = j; bt_stack[s].ml = 0; } if (p > 0) { i = p; j = q; goto repeat1; } continue; } else { vrna_message_error("backtrack failed in f5, segment [%d,%d]\n", i, j); } } break; /* true multi-loop backtrack in fML */ case 1: { int p, q, comp1, comp2; if (vrna_BT_mb_loop_split(vc, &i, &j, &p, &q, &comp1, &comp2, bp_list, &b)) { if (i > 0) { bt_stack[++s].i = i; bt_stack[s].j = j; bt_stack[s].ml = comp1; } if (p > 0) { bt_stack[++s].i = p; bt_stack[s].j = q; bt_stack[s].ml = comp2; } continue; } else { vrna_message_error("backtrack failed in fML\n%s", string); } } break; case 2: bp_list[++b].i = i; bp_list[b].j = j; goto repeat1; /* backtrack fake-multi loop parts */ case 3: case 4: { int lower, k, p, q; p = i; q = j; lower = (i <= se[0]) ? 1 : 0; if (vrna_BT_mb_loop_fake(vc, &k, &i, &j, bp_list, &b)) { if (k > 0) { bt_stack[++s].i = (lower) ? k : p; bt_stack[s].j = (lower) ? q : k; bt_stack[s].ml = ml; } if (i > 0) goto repeat1; continue; } else { vrna_message_error("backtrack failed in fc\n%s", string); } } break; } /* end of switch(ml) */ repeat1: /*----- begin of "repeat:" -----*/ ij = indx[j] + i; if (canonical) cij = my_c[ij]; type = vrna_get_ptype(ij, ptype); if (noLP) { if (vrna_BT_stack(vc, &i, &j, &cij, bp_list, &b)) { canonical = 0; goto repeat1; } } canonical = 1; no_close = (((type == 3) || (type == 4)) && noGUclosure); if (no_close) { if (cij == FORBIDDEN) continue; } else { if (vrna_BT_hp_loop(vc, i, j, cij, bp_list, &b)) continue; } if (vrna_BT_int_loop(vc, &i, &j, cij, bp_list, &b)) { if (i < 0) continue; else goto repeat1; } /* (i.j) must close a fake or true multi-loop */ int comp1, comp2; if (vrna_BT_mb_loop(vc, &i, &j, &k, cij, &comp1, &comp2)) { bt_stack[++s].i = i; bt_stack[s].j = k; bt_stack[s].ml = comp1; bt_stack[++s].i = k + 1; bt_stack[s].j = j; bt_stack[s].ml = comp2; } else { vrna_message_error("backtracking failed in repeat"); } /* end of repeat: --------------------------------------------------*/ } /* end >> while (s>0) << */ bp_list[0].i = b; /* save the total number of base pairs */ } PRIVATE void free_end(int *array, int i, int start, vrna_fold_compound_t *vc) { unsigned int *sn; int inc, type, energy, en, length, j, left, right, dangle_model, with_gquad, *indx, *c, *ggg, turn; vrna_param_t *P; short *S1; char *ptype; unsigned char *hard_constraints; vrna_mx_mfe_t *matrices; vrna_hc_t *hc; vrna_sc_t *sc; P = vc->params; dangle_model = P->model_details.dangles; with_gquad = P->model_details.gquad; turn = P->model_details.min_loop_size; inc = (i > start) ? 1 : -1; length = (int)vc->length; S1 = vc->sequence_encoding; ptype = vc->ptype; indx = vc->jindx; sn = vc->strand_number; matrices = vc->matrices; c = matrices->c; ggg = matrices->ggg; hc = vc->hc; sc = vc->sc; hard_constraints = hc->mx; if (hc->up_ext[i]) { if (i == start) array[i] = 0; else array[i] = array[i - inc]; if (sc) { if (sc->energy_up) array[i] += sc->energy_up[i][1]; if (sc->f) array[i] += sc->f(start, i, start, i - 1, VRNA_DECOMP_EXT_EXT, sc->data); } } else { array[i] = INF; } if (inc > 0) { left = start; right = i; } else { left = i; right = start; } /* hard code min_loop_size to 0, since we can not be sure yet that this is already the case */ turn = 0; for (j = start; inc * (i - j) > turn; j += inc) { int ii, jj; short si, sj; if (i > j) { ii = j; jj = i; } /* inc>0 */ else { ii = i; jj = j; } /* inc<0 */ if (hard_constraints[length * ii + jj] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP) { type = vrna_get_ptype(indx[jj] + ii, ptype); si = ((ii > 1) && (sn[ii - 1] == sn[ii])) ? S1[ii - 1] : -1; sj = ((jj < length) && (sn[jj] == sn[jj + 1])) ? S1[jj + 1] : -1; energy = c[indx[jj] + ii]; if ((sc) && (sc->f)) energy += sc->f(start, jj, ii - 1, ii, VRNA_DECOMP_EXT_EXT_STEM, sc->data); if (energy != INF) { switch (dangle_model) { case 0: if (array[j - inc] != INF) { en = array[j - inc] + energy + E_ExtLoop(type, -1, -1, P); array[i] = MIN2(array[i], en); } break; case 2: if (array[j - inc] != INF) { en = array[j - inc] + energy + E_ExtLoop(type, si, sj, P); array[i] = MIN2(array[i], en); } break; default: if (array[j - inc] != INF) { en = array[j - inc] + energy + E_ExtLoop(type, -1, -1, P); array[i] = MIN2(array[i], en); } if (inc > 0) { if (j > left) { if (hc->up_ext[ii - 1]) { if (array[j - 2] != INF) { en = array[j - 2] + energy + E_ExtLoop(type, si, -1, P); if (sc) if (sc->energy_up) en += sc->energy_up[ii - 1][1]; array[i] = MIN2(array[i], en); } } } } else if (j < right) { if (hc->up_ext[jj + 1]) { if (array[j + 2] != INF) { en = array[j + 2] + energy + E_ExtLoop(type, -1, sj, P); if (sc) if (sc->energy_up) en += sc->energy_up[jj + 1][1]; array[i] = MIN2(array[i], en); } } } break; } } } if (with_gquad) { if (sn[ii] == sn[jj]) if (array[j - inc] != INF) array[i] = MIN2(array[i], array[j - inc] + ggg[indx[jj] + ii]); } if (dangle_model % 2 == 1) { /* interval ends in a dangle (i.e. i-inc is paired) */ if (i > j) { ii = j; jj = i - 1; } /* inc>0 */ else { ii = i + 1; jj = j; } /* inc<0 */ if (!(hard_constraints[length * ii + jj] & VRNA_CONSTRAINT_CONTEXT_EXT_LOOP)) continue; type = vrna_get_ptype(indx[jj] + ii, ptype); si = (ii > left) && (sn[ii - 1] == sn[ii]) ? S1[ii - 1] : -1; sj = (jj < right) && (sn[jj] == sn[jj + 1]) ? S1[jj + 1] : -1; energy = c[indx[jj] + ii]; if (energy != INF) { if (inc > 0) { if (hc->up_ext[jj - 1]) { if (array[j - inc] != INF) { en = array[j - inc] + energy + E_ExtLoop(type, -1, sj, P); if (sc) if (sc->energy_up) en += sc->energy_up[jj + 1][1]; array[i] = MIN2(array[i], en); } } } else { if (hc->up_ext[ii - 1]) { if (array[j - inc] != INF) { en = array[j - inc] + energy + E_ExtLoop(type, si, -1, P); if (sc) if (sc->energy_up) en += sc->energy_up[ii - 1][1]; array[i] = MIN2(array[i], en); } } } if (j != start) { /* dangle_model on both sides */ if (hc->up_ext[jj - 1] && hc->up_ext[ii - 1]) { if (array[j - 2 * inc] != INF) { en = array[j - 2 * inc] + energy + E_ExtLoop(type, si, sj, P); if (sc) if (sc->energy_up) en += sc->energy_up[ii - 1][1] + sc->energy_up[jj + 1][1]; array[i] = MIN2(array[i], en); } } } } } } } PRIVATE void backtrack(sect bt_stack[], vrna_bp_stack_t *bp_list, vrna_fold_compound_t *vc) { /*routine to call backtrack_co from 1 to n, backtrack type??*/ backtrack_co(bt_stack, bp_list, 0, 0, vc); } PRIVATE void doubleseq(vrna_fold_compound_t *vc) { unsigned int length, i, s; length = vc->length; /* do some magic to re-use cofold code */ vc->sequence = vrna_realloc(vc->sequence, sizeof(char) * (2 * length + 2)); memcpy(vc->sequence + length, vc->sequence, sizeof(char) * length); vc->sequence[2 * length] = '\0'; vc->length = (unsigned int)strlen(vc->sequence); vc->cutpoint = length + 1; vc->strands = 2; free(vc->strand_number); vc->strand_number = (unsigned int *)vrna_alloc(sizeof(unsigned int) * (vc->length + 1)); for (s = i = 0; i <= vc->length; i++) { if (i == length + 1) s++; vc->strand_number[i] = s; } free(vc->strand_order); free(vc->strand_start); free(vc->strand_end); vc->strand_order = (unsigned int *)vrna_alloc(sizeof(unsigned int) * (vc->strands + 1)); vc->strand_start = (unsigned int *)vrna_alloc(sizeof(unsigned int) * (vc->strands + 1)); vc->strand_end = (unsigned int *)vrna_alloc(sizeof(unsigned int) * (vc->strands + 1)); vc->strand_order[0] = 0; vc->strand_order[1] = 1; vc->strand_start[0] = 1; vc->strand_end[0] = vc->strand_start[0] + length - 1; vc->strand_start[1] = vc->strand_end[0] + 1; vc->strand_end[1] = vc->strand_start[1] + length - 1; vc->sequence_encoding = vrna_realloc(vc->sequence_encoding, sizeof(short) * (vc->length + 2)); memcpy(vc->sequence_encoding + length + 1, vc->sequence_encoding + 1, sizeof(short) * length); vc->sequence_encoding[0] = vc->sequence_encoding[vc->length]; vc->sequence_encoding[vc->length + 1] = vc->sequence_encoding[1]; vc->sequence_encoding2 = vrna_realloc(vc->sequence_encoding2, sizeof(short) * (vc->length + 2)); memcpy(vc->sequence_encoding2 + length + 1, vc->sequence_encoding2 + 1, sizeof(short) * length); vc->sequence_encoding2[0] = vc->length; vc->sequence_encoding2[vc->length + 1] = 0; free(vc->ptype); vc->ptype = vrna_ptypes(vc->sequence_encoding2, &(vc->params->model_details)); free(vc->iindx); vc->iindx = vrna_idx_row_wise(vc->length); free(vc->jindx); vc->jindx = vrna_idx_col_wise(vc->length); vrna_hc_init(vc); /* add DP matrices */ vrna_mx_mfe_add(vc, VRNA_MX_DEFAULT, 0); } PRIVATE void halfseq(vrna_fold_compound_t *vc) { unsigned int halflength; halflength = vc->length / 2; vc->sequence = vrna_realloc(vc->sequence, sizeof(char) * (halflength + 1)); vc->sequence[halflength] = '\0'; vc->length = (unsigned int)strlen(vc->sequence); vc->cutpoint = -1; vc->strands = 1; vc->strand_number = (unsigned int *)vrna_realloc(vc->strand_number, sizeof(unsigned int) * (vc->length + 1)); vc->strand_order = (unsigned int *)vrna_realloc(vc->strand_order, sizeof(unsigned int) * (vc->strands + 1)); vc->strand_start = (unsigned int *)vrna_realloc(vc->strand_start, sizeof(unsigned int) * (vc->strands + 1)); vc->strand_end = (unsigned int *)vrna_realloc(vc->strand_end, sizeof(unsigned int) * (vc->strands + 1)); vc->sequence_encoding = vrna_realloc(vc->sequence_encoding, sizeof(short) * (vc->length + 2)); vc->sequence_encoding[0] = vc->sequence_encoding[vc->length]; vc->sequence_encoding[vc->length + 1] = vc->sequence_encoding[1]; vc->sequence_encoding2 = vrna_realloc(vc->sequence_encoding2, sizeof(short) * (vc->length + 2)); vc->sequence_encoding2[0] = vc->length; vc->sequence_encoding2[vc->length + 1] = 0; free(vc->ptype); vc->ptype = vrna_ptypes(vc->sequence_encoding2, &(vc->params->model_details)); free(vc->iindx); vc->iindx = vrna_idx_row_wise(vc->length); free(vc->jindx); vc->jindx = vrna_idx_col_wise(vc->length); vrna_hc_init(vc); /* add DP matrices */ vrna_mx_mfe_add(vc, VRNA_MX_DEFAULT, 0); } typedef struct { int i; int j; int e; int idxj; } zuker_pair; PRIVATE int comp_pair(const void *A, const void *B) { zuker_pair *x, *y; int ex, ey; x = (zuker_pair *)A; y = (zuker_pair *)B; ex = x->e; ey = y->e; if (ex > ey) return 1; if (ex < ey) return -1; return x->idxj + x->i - y->idxj + y->i; } PUBLIC SOLUTION * vrna_subopt_zuker(vrna_fold_compound_t *vc) { /* Compute zuker suboptimal. Here, we're abusing the cofold() code * "double" sequence, compute dimerarray entries, track back every base pair. * This is slightly wasteful compared to the normal solution */ char *structure, *mfestructure, **todo, *ptype; int i, j, counter, num_pairs, psize, p, *indx, *c, turn; unsigned int length, doublelength; float energy; SOLUTION *zukresults; vrna_bp_stack_t *bp_list; zuker_pair *pairlist; sect bt_stack[MAXSECTORS]; /* stack of partial structures for backtracking */ vrna_mx_mfe_t *matrices; vrna_md_t *md; md = &(vc->params->model_details); turn = md->min_loop_size; /* do some magic to re-use cofold code although vc is single sequence */ md->min_loop_size = 0; doubleseq(vc); if (!vrna_fold_compound_prepare(vc, VRNA_OPTION_MFE | VRNA_OPTION_HYBRID)) { vrna_message_warning("vrna_subopt_zuker@cofold.c: Failed to prepare vrna_fold_compound"); return NULL; } doublelength = vc->length; length = doublelength / 2; indx = vc->jindx; ptype = vc->ptype; matrices = vc->matrices; c = matrices->c; num_pairs = counter = 0; mfestructure = (char *)vrna_alloc((unsigned)doublelength + 1); structure = (char *)vrna_alloc((unsigned)doublelength + 1); zukresults = (SOLUTION *)vrna_alloc(((length * (length - 1)) / 2) * sizeof(SOLUTION)); mfestructure[0] = '\0'; /* store length at pos. 0 */ vc->sequence_encoding[0] = vc->sequence_encoding2[0]; /* get mfe and do forward recursion */ (void)fill_arrays(vc, 1); psize = length; pairlist = (zuker_pair *)vrna_alloc(sizeof(zuker_pair) * (psize + 1)); bp_list = (vrna_bp_stack_t *)vrna_alloc(sizeof(vrna_bp_stack_t) * (1 + length / 2)); todo = (char **)vrna_alloc(sizeof(char *) * (length + 1)); for (i = 1; i < length; i++) todo[i] = (char *)vrna_alloc(sizeof(char) * (length + 1)); /* Make a list of all base pairs */ for (i = 1; i < length; i++) { for (j = i + turn + 1 /*??*/; j <= length; j++) { if (ptype[indx[j] + i] == 0) continue; if (num_pairs >= psize) { psize = 1.2 * psize + 32; pairlist = vrna_realloc(pairlist, sizeof(zuker_pair) * (psize + 1)); } pairlist[num_pairs].i = i; pairlist[num_pairs].j = j; pairlist[num_pairs].e = c[indx[j] + i] + c[indx[i + length] + j]; pairlist[num_pairs++].idxj = indx[j]; todo[i][j] = 1; } } qsort(pairlist, num_pairs, sizeof(zuker_pair), comp_pair); for (p = 0; p < num_pairs; p++) { i = pairlist[p].i; j = pairlist[p].j; if (todo[i][j]) { int k; char *sz; bt_stack[1].i = i; bt_stack[1].j = j; bt_stack[1].ml = 2; backtrack_co(bt_stack, bp_list, 1, 0, vc); bt_stack[1].i = j; bt_stack[1].j = i + length; bt_stack[1].ml = 2; backtrack_co(bt_stack, bp_list, 1, bp_list[0].i, vc); energy = pairlist[p].e; sz = vrna_db_from_bp_stack(bp_list, length); zukresults[counter].energy = energy / 100.; zukresults[counter++].structure = sz; for (k = 1; k <= bp_list[0].i; k++) { /* mark all pairs in structure as done */ int x, y; x = bp_list[k].i; y = bp_list[k].j; if (x > length) x -= length; if (y > length) y -= length; if (x > y) { int temp; temp = x; x = y; y = temp; } todo[x][y] = 0; } } } /* clean up */ free(pairlist); for (i = 1; i < length; i++) free(todo[i]); free(todo); free(structure); free(mfestructure); free(bp_list); /* undo magic */ halfseq(vc); md->min_loop_size = turn; return zukresults; } /*###########################################*/ /*# deprecated functions below #*/ /*###########################################*/ #ifndef VRNA_DISABLE_BACKWARD_COMPATIBILITY PRIVATE void wrap_array_export(int **f5_p, int **c_p, int **fML_p, int **fM1_p, int **fc_p, int **indx_p, char **ptype_p) { /* make the DP arrays available to routines such as subopt() */ if (backward_compat_compound) { *f5_p = backward_compat_compound->matrices->f5; *c_p = backward_compat_compound->matrices->c; *fML_p = backward_compat_compound->matrices->fML; *fM1_p = backward_compat_compound->matrices->fM1; *fc_p = backward_compat_compound->matrices->fc; *indx_p = backward_compat_compound->jindx; *ptype_p = backward_compat_compound->ptype; } } /*--------------------------------------------------------------------------*/ PRIVATE float wrap_cofold(const char *string, char *structure, vrna_param_t *parameters, int is_constrained) { unsigned int length; char *seq; vrna_fold_compound_t *vc; vrna_param_t *P; float mfe; vc = NULL; length = strlen(string); #ifdef _OPENMP /* Explicitly turn off dynamic threads */ omp_set_dynamic(0); #endif /* we need the parameter structure for hard constraints */ if (parameters) { P = vrna_params_copy(parameters); } else { vrna_md_t md; set_model_details(&md); md.temperature = temperature; P = vrna_params(&md); } P->model_details.min_loop_size = 0; /* set min loop length to 0 */ /* dirty hack to reinsert the '&' according to the global variable 'cut_point' */ seq = vrna_cut_point_insert(string, cut_point); /* get compound structure */ vc = vrna_fold_compound(seq, &(P->model_details), 0); if (parameters) { /* replace params if necessary */ free(vc->params); vc->params = P; } else { free(P); } /* handle hard constraints in pseudo dot-bracket format if passed via simple interface */ if (is_constrained && structure) { unsigned int constraint_options = 0; constraint_options |= VRNA_CONSTRAINT_DB | VRNA_CONSTRAINT_DB_PIPE | VRNA_CONSTRAINT_DB_DOT | VRNA_CONSTRAINT_DB_X | VRNA_CONSTRAINT_DB_ANG_BRACK | VRNA_CONSTRAINT_DB_RND_BRACK | VRNA_CONSTRAINT_DB_INTRAMOL | VRNA_CONSTRAINT_DB_INTERMOL; vrna_constraints_add(vc, (const char *)structure, constraint_options); } if (backward_compat_compound) vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = vc; backward_compat = 1; /* cleanup */ free(seq); /* call mfe_dimer without backtracing */ mfe = vrna_mfe_dimer(vc, NULL); /* now we backtrace in a backward compatible way */ if (structure && vc->params->model_details.backtrack) { char *s; sect bt_stack[MAXSECTORS]; vrna_bp_stack_t *bp; bp = (vrna_bp_stack_t *)vrna_alloc(sizeof(vrna_bp_stack_t) * (4 * (1 + length / 2))); /* add a guess of how many G's may be involved in a G quadruplex */ backtrack(bt_stack, bp, vc); s = vrna_db_from_bp_stack(bp, length); strncpy(structure, s, length + 1); free(s); if (base_pair) free(base_pair); base_pair = bp; } return mfe; } PRIVATE SOLUTION * wrap_zukersubopt(const char *string, vrna_param_t *parameters) { vrna_fold_compound_t *vc; vrna_param_t *P; vc = NULL; #ifdef _OPENMP /* Explicitly turn off dynamic threads */ omp_set_dynamic(0); #endif /* we need the parameter structure for hard constraints */ if (parameters) { P = vrna_params_copy(parameters); } else { vrna_md_t md; set_model_details(&md); md.temperature = temperature; P = vrna_params(&md); } /* get compound structure */ vc = vrna_fold_compound(string, &(P->model_details), VRNA_OPTION_DEFAULT); if (parameters) { /* replace params if necessary */ free(vc->params); vc->params = P; } else { free(P); } if (backward_compat_compound) vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = vc; backward_compat = 1; return vrna_subopt_zuker(vc); } PUBLIC void initialize_cofold(int length) { /* DO NOTHING */ } PUBLIC void free_co_arrays(void) { if (backward_compat_compound && backward_compat) { vrna_fold_compound_free(backward_compat_compound); backward_compat_compound = NULL; backward_compat = 0; } } /*--------------------------------------------------------------------------*/ PUBLIC void export_cofold_arrays_gq(int **f5_p, int **c_p, int **fML_p, int **fM1_p, int **fc_p, int **ggg_p, int **indx_p, char **ptype_p) { /* make the DP arrays available to routines such as subopt() */ wrap_array_export(f5_p, c_p, fML_p, fM1_p, fc_p, indx_p, ptype_p); if (backward_compat_compound) *ggg_p = backward_compat_compound->matrices->ggg; } PUBLIC void export_cofold_arrays(int **f5_p, int **c_p, int **fML_p, int **fM1_p, int **fc_p, int **indx_p, char **ptype_p) { wrap_array_export(f5_p, c_p, fML_p, fM1_p, fc_p, indx_p, ptype_p); } PUBLIC float cofold(const char *string, char *structure) { return wrap_cofold(string, structure, NULL, fold_constrained); } PUBLIC float cofold_par(const char *string, char *structure, vrna_param_t *parameters, int is_constrained) { return wrap_cofold(string, structure, parameters, is_constrained); } PUBLIC SOLUTION * zukersubopt(const char *string) { return wrap_zukersubopt(string, NULL); } PUBLIC SOLUTION * zukersubopt_par(const char *string, vrna_param_t *parameters) { return wrap_zukersubopt(string, parameters); } PUBLIC void update_cofold_params(void) { vrna_fold_compound_t *v; if (backward_compat_compound && backward_compat) { vrna_md_t md; v = backward_compat_compound; if (v->params) free(v->params); set_model_details(&md); v->params = vrna_params(&md); } } PUBLIC void update_cofold_params_par(vrna_param_t *parameters) { vrna_fold_compound_t *v; if (backward_compat_compound && backward_compat) { v = backward_compat_compound; if (v->params) free(v->params); if (parameters) { v->params = vrna_params_copy(parameters); } else { vrna_md_t md; set_model_details(&md); md.temperature = temperature; v->params = vrna_params(&md); } } } PUBLIC void get_monomere_mfes(float *e1, float *e2) { /*exports monomere free energies*/ *e1 = mfe1; *e2 = mfe2; } #endif
omp_target_debug.c
// RUN: %libomptarget-compile-aarch64-unknown-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-aarch64-unknown-linux-gnu 2>&1 | %fcheck-aarch64-unknown-linux-gnu -allow-empty -check-prefix=DEBUG // RUN: %libomptarget-compile-aarch64-unknown-linux-gnu && env LIBOMPTARGET_DEBUG=0 %libomptarget-run-aarch64-unknown-linux-gnu 2>&1 | %fcheck-aarch64-unknown-linux-gnu -allow-empty -check-prefix=NDEBUG // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-powerpc64-ibm-linux-gnu 2>&1 | %fcheck-powerpc64-ibm-linux-gnu -allow-empty -check-prefix=DEBUG // RUN: %libomptarget-compile-powerpc64-ibm-linux-gnu && env LIBOMPTARGET_DEBUG=0 %libomptarget-run-powerpc64-ibm-linux-gnu 2>&1 | %fcheck-powerpc64-ibm-linux-gnu -allow-empty -check-prefix=NDEBUG // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-powerpc64le-ibm-linux-gnu 2>&1 | %fcheck-powerpc64le-ibm-linux-gnu -allow-empty -check-prefix=DEBUG // RUN: %libomptarget-compile-powerpc64le-ibm-linux-gnu && env LIBOMPTARGET_DEBUG=0 %libomptarget-run-powerpc64le-ibm-linux-gnu 2>&1 | %fcheck-powerpc64le-ibm-linux-gnu -allow-empty -check-prefix=NDEBUG // RUN: %libomptarget-compile-x86_64-pc-linux-gnu && env LIBOMPTARGET_DEBUG=1 %libomptarget-run-x86_64-pc-linux-gnu 2>&1 | %fcheck-x86_64-pc-linux-gnu -allow-empty -check-prefix=DEBUG // RUN: %libomptarget-compile-x86_64-pc-linux-gnu && env LIBOMPTARGET_DEBUG=0 %libomptarget-run-x86_64-pc-linux-gnu 2>&1 | %fcheck-x86_64-pc-linux-gnu -allow-empty -check-prefix=NDEBUG // REQUIRES: libomptarget-debug int main(void) { #pragma omp target {} return 0; } // DEBUG: Libomptarget // NDEBUG-NOT: Libomptarget // NDEBUG-NOT: Target
SB_TV_core.c
/* * This work is part of the Core Imaging Library developed by * Visual Analytics and Imaging System Group of the Science Technology * Facilities Council, STFC * * Copyright 2017 Daniil Kazantsev * Copyright 2017 Srikanth Nagella, Edoardo Pasca * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "SB_TV_core.h" /* C-OMP implementation of Split Bregman - TV denoising-regularisation model (2D/3D) [1] * * Input Parameters: * 1. Noisy image/volume * 2. lambda - regularisation parameter * 3. Number of iterations [OPTIONAL parameter] * 4. eplsilon - tolerance constant [OPTIONAL parameter] * 5. TV-type: 'iso' or 'l1' [OPTIONAL parameter] * * Output: * [1] Filtered/regularized image/volume * [2] Information vector which contains [iteration no., reached tolerance] * * [1]. Goldstein, T. and Osher, S., 2009. The split Bregman method for L1-regularized problems. SIAM journal on imaging sciences, 2(2), pp.323-343. */ float SB_TV_CPU_main(float *Input, float *Output, float *infovector, float mu, int iter, float epsil, int methodTV, int dimX, int dimY, int dimZ) { int ll; long j, DimTotal; float re, re1, lambda; re = 0.0f; re1 = 0.0f; int count = 0; mu = 1.0f/mu; lambda = 2.0f*mu; float *Output_prev=NULL, *Dx=NULL, *Dy=NULL, *Bx=NULL, *By=NULL; DimTotal = (long)(dimX*dimY*dimZ); Output_prev = calloc(DimTotal, sizeof(float)); Dx = calloc(DimTotal, sizeof(float)); Dy = calloc(DimTotal, sizeof(float)); Bx = calloc(DimTotal, sizeof(float)); By = calloc(DimTotal, sizeof(float)); if (dimZ == 1) { /* 2D case */ copyIm(Input, Output, (long)(dimX), (long)(dimY), 1l); /*initialize */ /* begin outer SB iterations */ for(ll=0; ll<iter; ll++) { /* storing old estimate */ copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), 1l); /* perform two GS iterations (normally 2 is enough for the convergence) */ gauss_seidel2D(Output, Input, Output_prev, Dx, Dy, Bx, By, (long)(dimX), (long)(dimY), lambda, mu); copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), 1l); /*GS iteration */ gauss_seidel2D(Output, Input, Output_prev, Dx, Dy, Bx, By, (long)(dimX), (long)(dimY), lambda, mu); /* TV-related step */ if (methodTV == 1) updDxDy_shrinkAniso2D(Output, Dx, Dy, Bx, By, (long)(dimX), (long)(dimY), lambda); else updDxDy_shrinkIso2D(Output, Dx, Dy, Bx, By, (long)(dimX), (long)(dimY), lambda); /* update for Bregman variables */ updBxBy2D(Output, Dx, Dy, Bx, By, (long)(dimX), (long)(dimY)); /* check early stopping criteria if epsilon not equal zero */ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(Output[j] - Output_prev[j],2); re1 += powf(Output[j],2); } re = sqrtf(re)/sqrtf(re1); /* stop if the norm residual is less than the tolerance EPS */ if (re < epsil) count++; if (count > 3) break; } } } else { /* 3D case */ float *Dz=NULL, *Bz=NULL; Dz = calloc(DimTotal, sizeof(float)); Bz = calloc(DimTotal, sizeof(float)); copyIm(Input, Output, (long)(dimX), (long)(dimY), (long)(dimZ)); /*initialize */ /* begin outer SB iterations */ for(ll=0; ll<iter; ll++) { /* storing old estimate */ copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); /* perform two GS iterations (normally 2 is enough for the convergence) */ gauss_seidel3D(Output, Input, Output_prev, Dx, Dy, Dz, Bx, By, Bz, (long)(dimX), (long)(dimY), (long)(dimZ), lambda, mu); copyIm(Output, Output_prev, (long)(dimX), (long)(dimY), (long)(dimZ)); /*GS iteration */ gauss_seidel3D(Output, Input, Output_prev, Dx, Dy, Dz, Bx, By, Bz, (long)(dimX), (long)(dimY), (long)(dimZ), lambda, mu); /* TV-related step */ if (methodTV == 1) updDxDyDz_shrinkAniso3D(Output, Dx, Dy, Dz, Bx, By, Bz, (long)(dimX), (long)(dimY), (long)(dimZ), lambda); else updDxDyDz_shrinkIso3D(Output, Dx, Dy, Dz, Bx, By, Bz, (long)(dimX), (long)(dimY), (long)(dimZ), lambda); /* update for Bregman variables */ updBxByBz3D(Output, Dx, Dy, Dz, Bx, By, Bz, (long)(dimX), (long)(dimY), (long)(dimZ)); /* check early stopping criteria if epsilon not equal zero */ if ((epsil != 0.0f) && (ll % 5 == 0)) { re = 0.0f; re1 = 0.0f; for(j=0; j<DimTotal; j++) { re += powf(Output[j] - Output_prev[j],2); re1 += powf(Output[j],2); } re = sqrtf(re)/sqrtf(re1); /* stop if the norm residual is less than the tolerance EPS */ if (re < epsil) count++; if (count > 3) break; } } free(Dz); free(Bz); } free(Output_prev); free(Dx); free(Dy); free(Bx); free(By); /*adding info into info_vector */ infovector[0] = (float)(ll); /*iterations number (if stopped earlier based on tolerance)*/ infovector[1] = re; /* reached tolerance */ return 0; } /********************************************************************/ /***************************2D Functions*****************************/ /********************************************************************/ float gauss_seidel2D(float *U, float *A, float *U_prev, float *Dx, float *Dy, float *Bx, float *By, long dimX, long dimY, float lambda, float mu) { float sum, normConst; long i,j,i1,i2,j1,j2,index; normConst = 1.0f/(mu + 4.0f*lambda); #pragma omp parallel for shared(U) private(index,i,j,i1,i2,j1,j2,sum) for(j=0; j<dimY; j++) { /* symmetric boundary conditions (Neuman) */ j1 = j+1; if (j1 == dimY) j1 = j-1; j2 = j-1; if (j2 < 0) j2 = j+1; for(i=0; i<dimX; i++) { /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; i2 = i-1; if (i2 < 0) i2 = i+1; index = j*dimX+i; sum = Dx[j*dimX+i2] - Dx[index] + Dy[j2*dimX+i] - Dy[index] - Bx[j*dimX+i2] + Bx[index] - By[j2*dimX+i] + By[index]; sum += U_prev[j*dimX+i1] + U_prev[j*dimX+i2] + U_prev[j1*dimX+i] + U_prev[j2*dimX+i]; sum *= lambda; sum += mu*A[index]; U[index] = normConst*sum; }} return *U; } float updDxDy_shrinkAniso2D(float *U, float *Dx, float *Dy, float *Bx, float *By, long dimX, long dimY, float lambda) { long i,j,i1,j1,index; float val1, val11, val2, val22, denom_lam; denom_lam = 1.0f/lambda; #pragma omp parallel for shared(U,denom_lam) private(index,i,j,i1,j1,val1,val11,val2,val22) for(j=0; j<dimY; j++) { j1 = j+1; if (j1 == dimY) j1 = j-1; for(i=0; i<dimX; i++) { /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; index = j*dimX+i; val1 = (U[j*dimX+i1] - U[index]) + Bx[index]; val2 = (U[j1*dimX+i] - U[index]) + By[index]; val11 = fabs(val1) - denom_lam; if (val11 < 0) val11 = 0; val22 = fabs(val2) - denom_lam; if (val22 < 0) val22 = 0; if (val1 !=0) Dx[index] = (val1/fabs(val1))*val11; else Dx[index] = 0; if (val2 !=0) Dy[index] = (val2/fabs(val2))*val22; else Dy[index] = 0; }} return 1; } float updDxDy_shrinkIso2D(float *U, float *Dx, float *Dy, float *Bx, float *By, long dimX, long dimY, float lambda) { long i,j,i1,j1,index; float val1, val11, val2, denom, denom_lam; denom_lam = 1.0f/lambda; #pragma omp parallel for shared(U,denom_lam) private(index,i,j,i1,j1,val1,val11,val2,denom) for(j=0; j<dimY; j++) { j1 = j+1; if (j1 == dimY) j1 = j-1; for(i=0; i<dimX; i++) { /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; index = j*dimX+i; val1 = (U[j*dimX+i1] - U[index]) + Bx[index]; val2 = (U[j1*dimX+i] - U[index]) + By[index]; denom = sqrt(val1*val1 + val2*val2); val11 = (denom - denom_lam); if (val11 < 0) val11 = 0.0f; if (denom != 0.0f) { Dx[index] = val11*(val1/denom); Dy[index] = val11*(val2/denom); } else { Dx[index] = 0; Dy[index] = 0; } }} return 1; } float updBxBy2D(float *U, float *Dx, float *Dy, float *Bx, float *By, long dimX, long dimY) { long i,j,i1,j1,index; #pragma omp parallel for shared(U) private(index,i,j,i1,j1) for(j=0; j<dimY; j++) { j1 = j+1; if (j1 == dimY) j1 = j-1; for(i=0; i<dimX; i++) { /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; index = j*dimX+i; Bx[index] += (U[j*dimX+i1] - U[index]) - Dx[index]; By[index] += (U[j1*dimX+i] - U[index]) - Dy[index]; }} return 1; } /********************************************************************/ /***************************3D Functions*****************************/ /********************************************************************/ /*****************************************************************/ float gauss_seidel3D(float *U, float *A, float *U_prev, float *Dx, float *Dy, float *Dz, float *Bx, float *By, float *Bz, long dimX, long dimY, long dimZ, float lambda, float mu) { float normConst, d_val, b_val, sum; long i,j,i1,i2,j1,j2,k,k1,k2,index; normConst = 1.0f/(mu + 6.0f*lambda); #pragma omp parallel for shared(U) private(index,i,j,i1,i2,j1,j2,k,k1,k2,d_val,b_val,sum) for(k=0; k<dimZ; k++) { k1 = k+1; if (k1 == dimZ) k1 = k-1; k2 = k-1; if (k2 < 0) k2 = k+1; for(j=0; j<dimY; j++) { j1 = j+1; if (j1 == dimY) j1 = j-1; j2 = j-1; if (j2 < 0) j2 = j+1; for(i=0; i<dimX; i++) { /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; i2 = i-1; if (i2 < 0) i2 = i+1; index = (dimX*dimY)*k + j*dimX+i; d_val = Dx[(dimX*dimY)*k + j*dimX+i2] - Dx[index] + Dy[(dimX*dimY)*k + j2*dimX+i] - Dy[index] + Dz[(dimX*dimY)*k2 + j*dimX+i] - Dz[index]; b_val = -Bx[(dimX*dimY)*k + j*dimX+i2] + Bx[index] - By[(dimX*dimY)*k + j2*dimX+i] + By[index] - Bz[(dimX*dimY)*k2 + j*dimX+i] + Bz[index]; sum = d_val + b_val; sum += U_prev[(dimX*dimY)*k + j*dimX+i1] + U_prev[(dimX*dimY)*k + j*dimX+i2] + U_prev[(dimX*dimY)*k + j1*dimX+i] + U_prev[(dimX*dimY)*k + j2*dimX+i] + U_prev[(dimX*dimY)*k1 + j*dimX+i] + U_prev[(dimX*dimY)*k2 + j*dimX+i]; sum *= lambda; sum += mu*A[index]; U[index] = normConst*sum; }}} return *U; } float updDxDyDz_shrinkAniso3D(float *U, float *Dx, float *Dy, float *Dz, float *Bx, float *By, float *Bz, long dimX, long dimY, long dimZ, float lambda) { long i,j,i1,j1,k,k1,index; float val1, val11, val2, val22, val3, val33, denom_lam; denom_lam = 1.0f/lambda; #pragma omp parallel for shared(U,denom_lam) private(index,i,j,i1,j1,k,k1,val1,val11,val2,val22,val3,val33) for(k=0; k<dimZ; k++) { k1 = k+1; if (k1 == dimZ) k1 = k-1; for(j=0; j<dimY; j++) { j1 = j+1; if (j1 == dimY) j1 = j-1; for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; val1 = (U[(dimX*dimY)*k + j*dimX+i1] - U[index]) + Bx[index]; val2 = (U[(dimX*dimY)*k + j1*dimX+i] - U[index]) + By[index]; val3 = (U[(dimX*dimY)*k1 + j*dimX+i] - U[index]) + Bz[index]; val11 = fabs(val1) - denom_lam; if (val11 < 0.0f) val11 = 0.0f; val22 = fabs(val2) - denom_lam; if (val22 < 0.0f) val22 = 0.0f; val33 = fabs(val3) - denom_lam; if (val33 < 0.0f) val33 = 0.0f; if (val1 !=0.0f) Dx[index] = (val1/fabs(val1))*val11; else Dx[index] = 0.0f; if (val2 !=0.0f) Dy[index] = (val2/fabs(val2))*val22; else Dy[index] = 0.0f; if (val3 !=0.0f) Dz[index] = (val3/fabs(val3))*val33; else Dz[index] = 0.0f; }}} return 1; } float updDxDyDz_shrinkIso3D(float *U, float *Dx, float *Dy, float *Dz, float *Bx, float *By, float *Bz, long dimX, long dimY, long dimZ, float lambda) { long i,j,i1,j1,k,k1,index; float val1, val11, val2, val3, denom, denom_lam; denom_lam = 1.0f/lambda; #pragma omp parallel for shared(U,denom_lam) private(index,denom,i,j,i1,j1,k,k1,val1,val11,val2,val3) for(k=0; k<dimZ; k++) { k1 = k+1; if (k1 == dimZ) k1 = k-1; for(j=0; j<dimY; j++) { j1 = j+1; if (j1 == dimY) j1 = j-1; for(i=0; i<dimX; i++) { /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; index = (dimX*dimY)*k + j*dimX+i; val1 = (U[(dimX*dimY)*k + j*dimX+i1] - U[index]) + Bx[index]; val2 = (U[(dimX*dimY)*k + j1*dimX+i] - U[index]) + By[index]; val3 = (U[(dimX*dimY)*k1 + j*dimX+i] - U[index]) + Bz[index]; denom = sqrt(val1*val1 + val2*val2 + val3*val3); val11 = (denom - denom_lam); if (val11 < 0) val11 = 0.0f; if (denom != 0.0f) { Dx[index] = val11*(val1/denom); Dy[index] = val11*(val2/denom); Dz[index] = val11*(val3/denom); } else { Dx[index] = 0; Dy[index] = 0; Dz[index] = 0; } }}} return 1; } float updBxByBz3D(float *U, float *Dx, float *Dy, float *Dz, float *Bx, float *By, float *Bz, long dimX, long dimY, long dimZ) { long i,j,k,i1,j1,k1,index; #pragma omp parallel for shared(U) private(index,i,j,k,i1,j1,k1) for(k=0; k<dimZ; k++) { k1 = k+1; if (k1 == dimZ) k1 = k-1; for(j=0; j<dimY; j++) { j1 = j+1; if (j1 == dimY) j1 = j-1; for(i=0; i<dimX; i++) { index = (dimX*dimY)*k + j*dimX+i; /* symmetric boundary conditions (Neuman) */ i1 = i+1; if (i1 == dimX) i1 = i-1; Bx[index] += (U[(dimX*dimY)*k + j*dimX+i1] - U[index]) - Dx[index]; By[index] += (U[(dimX*dimY)*k + j1*dimX+i] - U[index]) - Dy[index]; Bz[index] += (U[(dimX*dimY)*k1 + j*dimX+i] - U[index]) - Dz[index]; }}} return 1; }
bml_export_csr_typed.c
#include "../../macros.h" #include "../../typed.h" #include "../bml_allocate.h" #include "../bml_logger.h" #include "../bml_types.h" #include "bml_allocate_csr.h" #include "bml_export_csr.h" #include "bml_types_csr.h" #include <complex.h> #include <math.h> #include <stdlib.h> #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /** Convert a bml matrix into a dense matrix. * * \ingroup convert_group * * \param A The bml matrix * \return The dense matrix */ void *TYPED_FUNC( bml_export_to_dense_csr) ( bml_matrix_csr_t * A, bml_dense_order_t order) { int N = A->N_; REAL_T *A_dense = bml_allocate_memory(sizeof(REAL_T) * N * N); switch (order) { case dense_row_major: #pragma omp parallel for shared(N, A_dense) for (int i = 0; i < N; i++) { int *cols = A->data_[i]->cols_; REAL_T *vals = (REAL_T *) A->data_[i]->vals_; const int annz = A->data_[i]->NNZ_; for (int pos = 0; pos < annz; pos++) { const int j = cols[pos]; A_dense[ROWMAJOR(i, j, N, N)] = vals[pos]; } } break; case dense_column_major: #pragma omp parallel for shared(N, A_dense) for (int i = 0; i < N; i++) { int *cols = A->data_[i]->cols_; REAL_T *vals = (REAL_T *) A->data_[i]->vals_; const int annz = A->data_[i]->NNZ_; for (int pos = 0; pos < annz; pos++) { const int j = cols[pos]; A_dense[COLMAJOR(i, j, N, N)] = vals[pos]; } } break; default: LOG_ERROR("unknown order\n"); break; } return A_dense; }
GB_unaryop__ainv_uint32_fp64.c
//------------------------------------------------------------------------------ // GB_unaryop: hard-coded functions for each built-in unary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // If this file is in the Generated/ folder, do not edit it (auto-generated). #include "GB.h" #ifndef GBCOMPACT #include "GB_control.h" #include "GB_iterator.h" #include "GB_unaryop__include.h" // C=unop(A) is defined by the following types and operators: // op(A) function: GB_unop__ainv_uint32_fp64 // op(A') function: GB_tran__ainv_uint32_fp64 // C type: uint32_t // A type: double // cast: uint32_t cij ; GB_CAST_UNSIGNED(cij,aij,32) // unaryop: cij = -aij #define GB_ATYPE \ double #define GB_CTYPE \ uint32_t // aij = Ax [pA] #define GB_GETA(aij,Ax,pA) \ double aij = Ax [pA] #define GB_CX(p) Cx [p] // unary operator #define GB_OP(z, x) \ z = -x ; // casting #define GB_CASTING(z, aij) \ uint32_t z ; GB_CAST_UNSIGNED(z,aij,32) ; // cij = op (cast (aij)) #define GB_CAST_OP(pC,pA) \ { \ /* aij = Ax [pA] */ \ GB_GETA (aij, Ax, pA) ; \ /* Cx [pC] = op (cast (aij)) */ \ GB_CASTING (z, aij) ; \ GB_OP (GB_CX (pC), z) ; \ } // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_AINV || GxB_NO_UINT32 || GxB_NO_FP64) //------------------------------------------------------------------------------ // Cx = op (cast (Ax)): apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_unop__ainv_uint32_fp64 ( uint32_t *Cx, // Cx and Ax may be aliased double *Ax, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { GB_CAST_OP (p, p) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (cast (A')): transpose, typecast, and apply a unary operator //------------------------------------------------------------------------------ GrB_Info GB_tran__ainv_uint32_fp64 ( GrB_Matrix C, const GrB_Matrix A, int64_t *GB_RESTRICT *Rowcounts, GBI_single_iterator Iter, const int64_t *GB_RESTRICT A_slice, int naslice ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #define GB_PHASE_2_OF_2 #include "GB_unaryop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
primes.c
/* * primes.c: Example of prime numbers counting in OpenMP. * * (C) 2015 Mikhail Kurnosov <mkurnosov@gmail.com> */ #include <stdio.h> #include <math.h> #include <sys/time.h> #include <omp.h> const int a = 1; const int b = 20000000; double wtime() { struct timeval t; gettimeofday(&t, NULL); return (double)t.tv_sec + (double)t.tv_usec * 1E-6; } /* * is_prime_number: Returns 1 if n is a prime number and 0 otherwise. * This function uses trial division primality test. */ int is_prime_number(int n) { int limit = sqrt(n) + 1; for (int i = 2; i <= limit; i++) { if (n % i == 0) return 0; } return (n > 1) ? 1 : 0; } int count_prime_numbers(int a, int b) { int nprimes = 0; /* Count '2' as a prime number */ if (a <= 2) { nprimes = 1; a = 2; } /* Shift 'a' to odd number */ if (a % 2 == 0) a++; /* Loop over odd numbers: a, a + 2, a + 4, ... , b */ for (int i = a; i <= b; i += 2) { if (is_prime_number(i)) nprimes++; } return nprimes; } int count_prime_numbers_omp(int a, int b) { int nprimes = 0; /* Count '2' as a prime number */ if (a <= 2) { nprimes = 1; a = 2; } /* Shift 'a' to odd number */ if (a % 2 == 0) a++; #pragma omp parallel { double t = omp_get_wtime(); int nloc = 0; /* Loop over odd numbers: a, a + 2, a + 4, ... , b */ #pragma omp for schedule(dynamic,100) nowait for (int i = a; i <= b; i += 2) { if (is_prime_number(i)) nloc++; } /* 'nowait' disables barrier after for */ #pragma omp atomic nprimes += nloc; t = omp_get_wtime() - t; //printf("Thread %d execution time: %.6f sec.\n", omp_get_thread_num(), t); } return nprimes; } double run_serial() { double t = wtime(); int n = count_prime_numbers(a, b); t = wtime() - t; printf("Result (serial): %d\n", n); return t; } double run_parallel() { double t = wtime(); int n = count_prime_numbers_omp(a, b); t = wtime() - t; printf("Result (parallel): %d\n", n); return t; } int main(int argc, char **argv) { printf("Count prime numbers on [%d, %d]\n", a, b); double tserial = run_serial(); double tparallel = run_parallel(); printf("Execution time (serial): %.6f\n", tserial); printf("Execution time (parallel): %.6f\n", tparallel); printf("Speedup: %.2f\n", tserial / tparallel); return 0; }
omp_lock.c
#include <omp.h> #include <stdio.h> #define THREADS 512 #ifdef WAVE_SIZE #define WARPSIZE WAVE_SIZE #else #define WARPSIZE 64 #endif #pragma omp declare target omp_lock_t lock; #pragma omp end declare target int main() { if (WARPSIZE == 32) return 0; int error = 0; unsigned count = 0; // incremented within target region unsigned expected_count = 0; // incremented on host #pragma omp target omp_init_lock(&lock); // The lock implementation picks a thread from the warp to avoid the // deadlock that results if multiple threads try to CAS-loop at once // The lower/upper construct checks various active warp patterns const int edges[] = {0, 1, 32, 62, 63}; const int N = sizeof(edges) / sizeof(edges[0]); for (int l = 0; l < N; l++) { for (int u = 0; u < N; u++) { int lower = edges[l]; int upper = edges[u]; if (lower > upper) continue; expected_count += THREADS / WARPSIZE; #pragma omp target parallel num_threads(THREADS) map(tofrom : error, count) { int lane_id = omp_ext_get_lane_id(); if (lane_id >= lower && lane_id <= upper) { omp_set_lock(&lock); // mutex acts on a per warp basis if (omp_ext_get_lane_id() == lower) { // Increment once per warp count++; } if (!omp_test_lock(&lock)) { error = 1; } omp_unset_lock(&lock); } } } } #pragma omp target omp_destroy_lock(&lock); if (count != expected_count) { error = 1; } fprintf(stderr, "ec %d c %d\n", expected_count, count); return error; }
target_data.c
#pragma omp target data clause [clauses] structured-block
Mod-DRB024-simdtruedep-orig-yes.c
/* Copyright (c) 2017, Lawrence Livermore National Security, LLC. Produced at the Lawrence Livermore National Laboratory Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund, Markus Schordan, and Ian Karlin (email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov, schordan1@llnl.gov, karlin1@llnl.gov) LLNL-CODE-732144 All rights reserved. This file is part of DataRaceBench. For details, see https://github.com/LLNL/dataracebench. Please also see the LICENSE file for our additional BSD notice. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the disclaimer below. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the disclaimer (as noted below) in the documentation and/or other materials provided with the distribution. * Neither the name of the LLNS/LLNL nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ /* This one has data races due to true dependence. But data races happen at instruction level, not thread level. Data race pair: a[i+1]@66:5 vs. a[i]@66:12 */ #include "omprace.h" #include <omp.h> #include <stdio.h> int main(int argc, char* argv[]) { omprace_init(); int i; int len=100; int a[100], b[100]; for (i=0;i<len;i++) { a[i]=i; b[i]=i+1; } #pragma omp parallel for schedule(dynamic,1) for (i=0;i<len-1;i++) a[i+1]=a[i]+b[i]; for (i=0;i<len;i++) printf("i=%d a[%d]=%d\n",i,i,a[i]); omprace_fini(); return 0; }
open_mp.c
#include <stdio.h> #include <mpi.h> #include <math.h> #include <stdlib.h> #include <time.h> #include <unistd.h> #include <omp.h> #include "../funcs.h" #define ROW_SIZE 320 #define COLUMN_SIZE 320 #define REPEAT_TIMES 20 #define MAX_TIMES 500 #define TERMCHECK_TIMES 10 #define TERMINATION_CHECK int main() { MPI_Comm comm; int ndims, reorder, periods[2]; int * dim_size = NULL; int rank,size; int * cells = NULL; int * np_cells = NULL; int * temp = NULL; int i, j; int neighbours=0; int local_rows, local_columns; #ifdef TERMINATION_CHECK printf("MPI_Allreduce enabled (every 10 iterations)\n"); int not_duplicate=1, not_dead=1; int global_duplicate, global_dead; #endif /*coords*/ int coords[2]; int neighbour_coords[2]; /*neighbour ranks*/ int north_rank, south_rank, west_rank, east_rank; int north_west_rank,north_east_rank, south_west_rank, south_east_rank; /*requests for Isend/IRcv*/ MPI_Request ISReqs[8], IRReqs[8]; MPI_Status ISStatus[8], IRStatus[8]; /*variables used for calculating time*/ double local_start, local_finish, local_elapsed, elapsed; double local_overall_start, local_overall_finish, local_overall_elapsed, overall_elapsed; int n = 0; /*constructing mpi space*/ MPI_Init(NULL, NULL); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); /*setting values for MPI_Cart_create*/ ndims = 2; /* 2D matrix/grneighbour_id */ dim_size = Calculate_Dimensions(size); /* rows & columns */ periods[0] = 1; /* row periodic */ periods[1] = 1; /* column periodic */ reorder = 1; /* allows processes reordered for efficiency */ /*constructing cartesian matrix for processes(topology)*/ MPI_Cart_create(MPI_COMM_WORLD, ndims, dim_size, periods, reorder, &comm); /*we use cart_coords so we can know the coordinates of the process*/ MPI_Cart_coords(comm, rank, ndims, coords); /*constructing the cell array & next phase cell array*/ local_rows = ROW_SIZE/dim_size[0] + 2; local_columns = COLUMN_SIZE/dim_size[1] + 2; /*make a new datatype so we can pass columns to other processes easier*/ MPI_Datatype column; MPI_Type_vector(local_rows - 2, 1, local_columns, MPI_INT, &column); MPI_Type_commit(&column); if( (cells = malloc(local_rows * local_columns * sizeof(int))) == NULL ) perror_exit("Malloc failed(cells)"); if( (np_cells = malloc( local_rows * local_columns * sizeof(int))) == NULL ) perror_exit("Malloc failed(np_cells)"); srand(time(NULL) + rank); #pragma omp parallel for private(i,j) collapse(2) for(i=1; i<local_rows-1; i++) { for(j=1; j<local_columns-1; j++) { cells[local_columns*i +j] = rand() % 2; } } /*using cart rank we found the neighbours of our process*/ /*north neighbour*/ neighbour_coords[0] = coords[0]; neighbour_coords[1] = coords[1]; neighbour_coords[0] -= 1; MPI_Cart_rank(comm, neighbour_coords, &north_rank); /*south neighbour*/ neighbour_coords[0] = coords[0]; neighbour_coords[1] = coords[1]; neighbour_coords[0] += 1; MPI_Cart_rank(comm, neighbour_coords, &south_rank); /*east neighbour*/ neighbour_coords[0] = coords[0]; neighbour_coords[1] = coords[1]; neighbour_coords[1] += 1; MPI_Cart_rank(comm, neighbour_coords, &east_rank); /*west neighbour*/ neighbour_coords[0] = coords[0]; neighbour_coords[1] = coords[1]; neighbour_coords[1] -= 1; MPI_Cart_rank(comm, neighbour_coords, &west_rank); /*north west neighbour*/ neighbour_coords[0] = coords[0]; neighbour_coords[1] = coords[1]; neighbour_coords[0] -= 1; neighbour_coords[1] -= 1; MPI_Cart_rank(comm, neighbour_coords, &north_west_rank); /*north east neighbour*/ neighbour_coords[0] = coords[0]; neighbour_coords[1] = coords[1]; neighbour_coords[0] -= 1; neighbour_coords[1] += 1; MPI_Cart_rank(comm, neighbour_coords, &north_east_rank); /*south west neighbour*/ neighbour_coords[0] = coords[0]; neighbour_coords[1] = coords[1]; neighbour_coords[0] += 1; neighbour_coords[1] -= 1; MPI_Cart_rank(comm, neighbour_coords, &south_west_rank); /*south east neighbour*/ neighbour_coords[0] = coords[0]; neighbour_coords[1] = coords[1]; neighbour_coords[0] += 1; neighbour_coords[1] += 1; MPI_Cart_rank(comm, neighbour_coords, &south_east_rank); /*synchronize processes*/ MPI_Barrier(comm); /*================================================> start overall calculation time*/ local_overall_start = MPI_Wtime(); while( n < MAX_TIMES ) { /*===============================================> =start calculation time*/ local_start = MPI_Wtime(); /*send to the neighbours the appropriate columns and rows(non-blocking)*/ //MPI_Isend( const void *buf, int count, MPI_Datatype datatype, int dest, int tag, MPI_Comm comm, MPI_Request *request) MPI_Isend(&cells[local_columns + 1], local_columns - 2 , MPI_INT, north_rank, 0, comm, &ISReqs[0]); MPI_Isend(&cells[local_columns * (local_rows - 2) + 1], local_columns - 2, MPI_INT, south_rank, 1, comm, &ISReqs[1]); MPI_Isend(&cells[local_columns + 1], 1, column, west_rank, 2, comm, &ISReqs[2]); MPI_Isend(&cells[local_columns + (local_columns-2)], 1, column, east_rank, 3, comm, &ISReqs[3]); MPI_Isend(&cells[local_columns + 1], 1, MPI_INT, north_west_rank, 4, comm, &ISReqs[4]); MPI_Isend(&cells[local_columns + local_columns - 2], 1, MPI_INT, north_east_rank, 5, comm, &ISReqs[5]); MPI_Isend(&cells[local_columns * (local_rows-2) + 1], 1, MPI_INT, south_west_rank, 6, comm, &ISReqs[6]); MPI_Isend(&cells[local_columns * (local_rows-2) + local_columns -2], 1, MPI_INT, south_east_rank, 7, comm, &ISReqs[7]); /*receive from the neighbours the appropriate columns and rows*/ //int MPI_Irecv(void *buf, int count, MPI_Datatype datatype, int source, int tag, MPI_Comm comm, MPI_Request *request) MPI_Irecv(&cells[1], local_columns - 2 , MPI_INT, north_rank, 1, comm, &IRReqs[0]); MPI_Irecv(&cells[local_columns*(local_rows-1) + 1], local_columns - 2, MPI_INT, south_rank, 0, comm, &IRReqs[1]); MPI_Irecv(&cells[local_columns], 1, column, west_rank, 3, comm, &IRReqs[2]); MPI_Irecv(&cells[local_columns + local_columns -1], 1, column, east_rank, 2, comm, &IRReqs[3]); MPI_Irecv(&cells[0], 1, MPI_INT, north_west_rank, 7, comm, &IRReqs[4]); MPI_Irecv(&cells[local_columns - 1], 1, MPI_INT, north_east_rank, 6, comm, &IRReqs[5]); MPI_Irecv(&cells[local_columns * (local_rows-1)], 1, MPI_INT, south_west_rank, 5, comm, &IRReqs[6]); MPI_Irecv(&cells[local_columns * (local_rows-1) + (local_columns-1)], 1, MPI_INT, south_east_rank, 4, comm, &IRReqs[7]); /*calculate the next phase without the info from the neighbours*/ #pragma omp parallel for private(i,j) collapse(2) for(i=2; i<local_rows-2; i++) { for(j=2; j<local_columns-2; j++) { neighbours = Calculate_Neighbours(cells, local_columns, i, j); np_cells[local_columns * i + j] = Dead_Or_Alive(cells, local_columns, i, j, neighbours); neighbours=0; } } //------>openmp /*wait until we receive and send everything from/to neighbours*/ MPI_Waitall(8, ISReqs, ISStatus); MPI_Waitall(8, IRReqs, IRStatus); /*continue calculating with the info from the neighbours we received*/ /*calculating first and last row*/ #pragma omp parallel for private(j,neighbours) for(j=1; j<local_columns-1; j++) { neighbours = Calculate_Neighbours(cells, local_columns, 1, j); np_cells[local_columns + j] = Dead_Or_Alive(cells, local_columns, 1, j, neighbours); neighbours = 0; } #pragma omp parallel for private(j,neighbours) for(j=1; j<local_columns-1; j++) { neighbours = Calculate_Neighbours(cells, local_columns, local_rows-2, j); np_cells[local_columns*(local_rows - 2) + j] = Dead_Or_Alive(cells, local_columns, local_rows-2, j, neighbours); neighbours = 0; } /*calculating first and last column*/ #pragma omp parallel for private(i,neighbours) for(i=1; i<local_rows-1; i++) { neighbours = Calculate_Neighbours(cells, local_columns, i, 1); np_cells[local_columns*i + 1] = Dead_Or_Alive(cells, local_columns, i, 1, neighbours); neighbours = 0; } #pragma omp parallel for private(i,neighbours) for(i=1; i<local_rows-1; i++) { neighbours = Calculate_Neighbours(cells, local_columns, i, local_columns-2); np_cells[local_columns*i + local_columns - 2] = Dead_Or_Alive(cells, local_columns, i, local_columns-1, neighbours); neighbours = 0; } #ifdef TERMINATION_CHECK if( n % TERMCHECK_TIMES == 0) { not_duplicate = 0; not_dead = 0; /*compare current phase with the next one && check if everything is dead*/ #pragma omp parallel for private(i,j) collapse(2) for(i=1; i<local_rows-1; i++) { for(j=1; j<local_columns-1; j++) { if( cells[local_columns*i + j] != np_cells[local_columns*i + j] ) { #pragma omp critical not_duplicate = 1; } } } #pragma omp parallel for private(i,j) collapse(2) for(i=1; i<local_rows-1; i++) { for(j=1; j<local_columns-1; j++) { if( cells[local_columns*i + j] == ALIVE ) { #pragma omp critical not_dead = 1; } } } /*All reduce to check if everything is dead*/ MPI_Allreduce(&not_dead, &global_dead, 1, MPI_INT, MPI_MAX, comm); if( global_dead == 0 ) { if( rank == 0) printf("Every cell is dead!\n"); // free(cells); // cells = NULL; // free(np_cells); // np_cells = NULL; // MPI_Finalize(); // exit(EXIT_SUCCESS); } /*All reduce to check if next generation is the same as the current one*/ MPI_Allreduce(&not_duplicate, &global_duplicate, 1, MPI_INT, MPI_SUM, comm); if( global_duplicate == 0 ) { if( rank == 0 ) printf("Next generation is the same as the current one!\n"); // free(cells); // cells = NULL; // free(np_cells); // np_cells = NULL; // MPI_Finalize(); // exit(EXIT_SUCCESS); } } #endif /*===================================================> end calculation time*/ local_finish = MPI_Wtime(); local_elapsed += local_finish - local_start; if( n % REPEAT_TIMES == 0 ) { MPI_Reduce(&local_elapsed, &elapsed, 1, MPI_DOUBLE, MPI_MAX, 0, comm); local_elapsed=0; if( rank == 0 ) printf("->Elapsed time: %.10f seconds\n", elapsed); } /*swap next generation array with the current generation array*/ temp = cells; cells = np_cells; np_cells = temp; /*increment counter for loop */ n++; } /*================================================> end overall calculation time*/ local_overall_finish=MPI_Wtime(); local_overall_elapsed = local_overall_finish - local_overall_start; MPI_Reduce(&local_overall_elapsed, &overall_elapsed, 1, MPI_DOUBLE, MPI_MAX, 0, comm); if(rank==0) printf("->>Total time elapsed = %.10f seconds\n" , overall_elapsed); /*free allocated memory*/ free(cells); cells = NULL; free(np_cells); np_cells = NULL; MPI_Finalize(); exit(EXIT_SUCCESS); }
c_jacobi01.c
/* *********************************************************************** This program is part of the OpenMP Source Code Repository http://www.pcg.ull.es/ompscr/ e-mail: ompscr@etsii.ull.es Copyright (c) 2004, OmpSCR Group All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the University of La Laguna nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. FILE: c_jacobi01.c VERSION: 1.1 DATE: Oct 2004 AUTHORS: Author: Joseph Robicheaux, Kuck and Associates, Inc. (KAI), 1998 Modified: Sanjiv Shah, Kuck and Associates, Inc. (KAI), 1998 This version: Dieter an Mey, Aachen University (RWTH), 1999 - 2003 anmey@rz.rwth-aachen.de http://www.rwth-aachen.de/People/D.an.Mey.html COMMENTS TO: ompscr@etsii.ull.es DESCRIPTION: program to solve a finite difference discretization of Helmholtz equation : (d2/dx2)u + (d2/dy2)u - alpha u = f using Jacobi iterative method. COMMENTS: OpenMP version 1: two parallel regions with one parallel loop each, the naive approach. Directives are used in this code to achieve paralleism. All do loops are parallized with default 'static' scheduling. REFERENCES: http://www.rz.rwth-aachen.de/computing/hpc/prog/par/openmp/jacobi.html BASIC PRAGMAS: parallel for USAGE: ./c_jacobi01.par 5000 5000 0.8 1.0 1000 INPUT: n - grid dimension in x direction m - grid dimension in y direction alpha - Helmholtz constant (always greater than 0.0) tol - error tolerance for iterative solver relax - Successice over relaxation parameter mits - Maximum iterations for iterative solver OUTPUT: Residual and error u(n,m) - Dependent variable (solutions) f(n,m) - Right hand side function FILE FORMATS: - RESTRICTIONS: - REVISION HISTORY: **************************************************************************/ #include <stdio.h> #include <math.h> #include <stdlib.h> #include "OmpSCR.h" #define U(i,j) u[(i)*n+(j)] #define F(i,j) f[(i)*n+(j)] #define NUM_ARGS 6 #define NUM_TIMERS 1 int n, m, mits; double tol, relax, alpha; void jacobi (int n, int m, double dx, double dy, double alpha, double omega, double *u, double *f, double tol, int maxit ); /****************************************************** * Initializes data * Assumes exact solution is u(x,y) = (1-x^2)*(1-y^2) * ******************************************************/ void initialize( int n, int m, double alpha, double *dx, double *dy, double *u, double *f) { int i,j,xx,yy; *dx = 2.0 / (n-1); *dy = 2.0 / (m-1); /* Initilize initial condition and RHS */ for (j=0; j<m; j++){ for (i=0; i<n; i++){ xx = -1.0 + *dx * (i-1); yy = -1.0 + *dy * (j-1); U(j,i) = 0.0; F(j,i) = -alpha * (1.0 - xx*xx) * (1.0 - yy*yy) - 2.0 * (1.0 - xx*xx) - 2.0 * (1.0 - yy*yy); } } } /************************************************************ * Checks error between numerical and exact solution * ************************************************************/ void error_check( int n, int m, double alpha, double dx, double dy, double *u, double *f) { int i,j; double xx, yy, temp, error; dx = 2.0 / (n-1); dy = 2.0 / (n-2); error = 0.0; for (j=0; j<m; j++){ for (i=0; i<n; i++){ xx = -1.0 + dx * (i-1); yy = -1.0 + dy * (j-1); temp = U(j,i) - (1.0 - xx*xx) * (1.0 - yy*yy); error += temp*temp; } } error = sqrt(error)/(n*m); printf("Solution Error : %g\n", error); } int main(int argc, char **argv){ double *u, *f, dx, dy; double dt, mflops; int NUMTHREADS; char *PARAM_NAMES[NUM_ARGS] = {"Grid dimension: X dir =", "Grid dimension: Y dir =", "Helmhotlz constant =", "Successive over-relaxation parameter =", "error tolerance for iterative solver =", "Maximum iterations for solver ="}; char *TIMERS_NAMES[NUM_TIMERS] = {"Total_time"}; char *DEFAULT_VALUES[NUM_ARGS] = {"5000", "5000", "0.8", "1.0", "1e-7", "1000"}; NUMTHREADS = omp_get_max_threads(); OSCR_init (NUMTHREADS, "Jacobi Solver v1", "Use 'jacobi01' <n> <m> <alpha> <relax> <tol> <mits>", NUM_ARGS, PARAM_NAMES, DEFAULT_VALUES , NUM_TIMERS, NUM_TIMERS, TIMERS_NAMES, argc, argv); n = OSCR_getarg_int(1); m = OSCR_getarg_int(2); alpha = OSCR_getarg_double(3); relax = OSCR_getarg_double(4); tol = OSCR_getarg_double(5); mits = OSCR_getarg_int(6); printf("-> %d, %d, %g, %g, %g, %d\n", n, m, alpha, relax, tol, mits); u = (double *) OSCR_malloc(n*m*sizeof(double)); f = (double *) OSCR_malloc(n*m*sizeof(double)); /* arrays are allocated and initialzed */ initialize(n, m, alpha, &dx, &dy, u, f); /* Solve Helmholtz eqiation */ OSCR_timer_start(0); jacobi(n, m, dx, dy, alpha, relax, u,f, tol, mits); OSCR_timer_stop(0); dt = OSCR_timer_read(0); printf(" elapsed time : %12.6f\n", dt); mflops = (0.000001*mits*(m-2)*(n-2)*13) / dt; printf(" MFlops : %12.6g (%d, %d, %d, %g)\n",mflops, mits, m, n, dt); error_check(n, m, alpha, dx, dy, u, f); OSCR_report(1, TIMERS_NAMES); return 0; } /* subroutine jacobi (n,m,dx,dy,alpha,omega,u,f,tol,maxit) ****************************************************************** * Subroutine HelmholtzJ * Solves poisson equation on rectangular grid assuming : * (1) Uniform discretization in each direction, and * (2) Dirichlect boundary conditions * * Jacobi method is used in this routine * * Input : n,m Number of grid points in the X/Y directions * dx,dy Grid spacing in the X/Y directions * alpha Helmholtz eqn. coefficient * omega Relaxation factor * f(n,m) Right hand side function * u(n,m) Dependent variable/Solution * tol Tolerance for iterative solver * maxit Maximum number of iterations * * Output : u(n,m) - Solution ***************************************************************** */ void jacobi ( const int n, const int m, double dx, double dy, double alpha, double omega, double *u, double *f, double tol, int maxit ) { int i,j,k; double error, resid, ax, ay, b; double *uold; /* wegen Array-Kompatibilitaet, werden die Zeilen und Spalten (im Kopf) getauscht, zB uold[spalten_num][zeilen_num]; bzw. wir tuen so, als ob wir das gespiegelte Problem loesen wollen */ uold = (double *)OSCR_malloc(sizeof(double) * n *m); ax = 1.0/(dx * dx); /* X-direction coef */ ay = 1.0/(dy*dy); /* Y_direction coef */ b = -2.0/(dx*dx)-2.0/(dy*dy) - alpha; /* Central coeff */ error = 10.0 * tol; k = 1; while (k <= maxit && error > tol) { error = 0.0; /* copy new solution into old */ #pragma omp parallel for private(i) schedule(dynamic) for (j=0; j<m; j++) for (i=0; i<n; i++) uold[i + m*j] = u[i + m*j]; /* compute stencil, residual and update */ #pragma omp parallel for reduction(+:error) private(i,resid) schedule(dynamic) for (j=1; j<m-1; j++) for (i=1; i<n-1; i++){ resid =( ax * (uold[i-1 + m*j] + uold[i+1 + m*j]) + ay * (uold[i + m*(j-1)] + uold[i + m*(j+1)]) + b * uold[i + m*j] - f[i + m*j] ) / b; /* update solution */ u[i + m*j] = uold[i + m*j] - omega * resid; /* accumulate residual error */ error =error + resid*resid; } /* error check */ k++; error = sqrt(error) /(n*m); } /* while */ printf("Total Number of Iterations %d\n", k); printf("Residual %.15f\n\n", error); free(uold); }
ops.h
/******************************************************************************* * Copyright (c) 2015-2018 Skymind, Inc. * * This program and the accompanying materials are made available under the * terms of the Apache License, Version 2.0 which is available at * https://www.apache.org/licenses/LICENSE-2.0. * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the * License for the specific language governing permissions and limitations * under the License. * * SPDX-License-Identifier: Apache-2.0 ******************************************************************************/ #pragma once #ifndef OPS_H_ #define OPS_H_ #include <op_boilerplate.h> #include <array/DataTypeUtils.h> #include <helpers/shape.h> #include <vector> #include <Environment.h> #include <loops/summarystatsreduce.h> #include <loops/ReduceType.h> #define MIN_V 1e-12 #define MAX_FLOAT 1e37 #define MIN_FLOAT 1e-37 #define MAX_INT 2147483647 #define MIN_CUTFOFF -3.79297773665f #define FLOAT_MIN_NORMAL 1.17549435e-38 #define EPS 1e-5 #define AFFINITY close #define DOUBLE_PI_T T(2.0 * 3.14159265358979323846) #define DOUBLE_PI_X X(2.0 * 3.14159265358979323846) #define no_op_exec_special_any static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_bool static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_same static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special static const bool requiresSpecial = false; static void execSpecial(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, Z *extraParams, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #define no_op_exec_special_accumulation_long static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #define no_op_exec_special_accumulation_same static const bool requiresSpecialAccumulation = false; static void execSpecial(X *x, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfoBuffer, int *dimension, int dimensionLength, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffset){} #ifdef __CUDACC__ #define no_op_exec_special_any_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_bool_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, Z *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_same_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer, X *result, Nd4jLong *resultShapeBuffer, X *extraParams, int *allocationPointer, X *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_cuda static __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeBuffer,Z *result, Nd4jLong *resultShapeBuffer,Z *extraParams, int *allocationPointer, Z *reductionPointer, Nd4jLong *tadShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_same_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, X *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, X *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_long_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, X *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #define no_op_exec_special_accumulation_cuda static inline __device__ void execSpecialCuda(X *dx, Nd4jLong *xShapeInfo, Z *extraParams, Z *result, Nd4jLong *resultShapeInfo, int *dimension, int dimensionLength, Z *reductionBuffer, Nd4jLong *tadOnlyShapeInfo, Nd4jLong *tadOffsets) {} #else // hacky fix for isnan/being being out of scope //#ifdef IOS //#define isinf(x) 0 // this isn't right. But std::isinf fails //#define isnan(x) 0 //#else //#define isnan std::isnan //#define isinf std::isinf //#endif #define no_op_exec_special_cuda #define no_op_exec_special_accumulation_cuda #define no_op_exec_special_accumulation_same_cuda #define no_op_exec_special_accumulation_long_cuda #define no_op_exec_special_any_cuda #define no_op_exec_special_bool_cuda #define no_op_exec_special_same_cuda #define no_op_exec_special_accumulation_same_cuda #endif #define SELU_ALPHA 1.6732632423543772848170429916717 #define SELU_LAMBDA 1.0507009873554804934193349852946 #ifdef _OPENMP #pragma omp declare reduction(maxTF : float,double,float16,bfloat16 : \ omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\ initializer (omp_priv=-MAX_FLOAT) #pragma omp declare reduction(minTF : float,double,float16,bfloat16 : \ omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\ initializer (omp_priv=MAX_FLOAT) #pragma omp declare reduction(maxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_max(omp_in, omp_out) )\ initializer (omp_priv=0) #pragma omp declare reduction(minT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_min(omp_in, omp_out) )\ initializer (omp_priv=0) #pragma omp declare reduction(amaxT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_max(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) ) #pragma omp declare reduction(aminT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_min(nd4j::math::nd4j_abs(omp_in), nd4j::math::nd4j_abs(omp_out)) ) #pragma omp declare reduction(asumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = nd4j::math::nd4j_abs(omp_in) + nd4j::math::nd4j_abs(omp_out))\ initializer (omp_priv=0) #pragma omp declare reduction(sumT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = omp_in + omp_out)\ initializer (omp_priv=0) #pragma omp declare reduction(prodT : float,double,float16,bfloat16,int,Nd4jLong,Nd4jULong,int8_t,uint8_t,bool,int16_t,uint16_t,uint32_t : \ omp_out = omp_in * omp_out)\ initializer (omp_priv=1) #endif namespace functions { namespace indexreduce { template <typename T> struct IndexValue { T value; Nd4jLong index; _CUDA_HD IndexValue() = default; _CUDA_HD IndexValue(const T val, const Nd4jLong ind): index(ind), value(val) {} }; } namespace summarystats { template <typename T> class SummaryStatsData; } } namespace simdOps { template <typename X, typename Y, typename Z> class Add { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 + d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 + d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 + params[0]); } op_def static X startingValue() { return static_cast<X>(0.f); } }; template <typename X, typename Y> class NewAdd { public: op_def static X op(X d1, Y d2, X *params) { return d1 + d2; } }; template <typename X, typename Y, typename Z> class Subtract { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 - d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 - d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 - params[0]); } }; template <typename X, typename Y, typename Z> class SquaredSubtract { public: op_def static Z op(X d1, Y d2) { auto d = static_cast<Z>(d1 - d2); return d * d; } op_def static Z op(X d1, Y d2, Z *params) { auto d = static_cast<Z>(d1 - d2); return d * d; } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { auto d = static_cast<Z>(d1 - params[0]); return d * d; } }; template <typename X, typename Y, typename Z> class SquaredReverseSubtract { public: op_def static Z op(X d1, Y d2) { auto d = static_cast<Z>(d2 - d1); return d * d; } op_def static Z op(X d1, Y d2, Z *params) { auto d = static_cast<Z>(d2 - d1); return d * d; } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { auto d = static_cast<Z>(params[0] - d1); return d * d; } }; template <typename X, typename Y, typename Z> class ReverseSubtract { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2 - d1); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2 - d1); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(params[0] - d1); } }; template <typename X, typename Y, typename Z> class LogPoissonLossFull { public: op_def static Z op(X z, Y c) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz))); } op_def static Z op(X z, Y c, Z *params) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc + (zz * nd4j::math::nd4j_log<X, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz))); } op_def static Z op(X z) { auto zz = static_cast<Z>(z); return (zz * nd4j::math::nd4j_log<Y, Z>(z) - zz + static_cast<Z>(0.5f) * nd4j::math::nd4j_log<Z, Z>(static_cast<Z>(DOUBLE_PI_X) * zz)); } // op for MetaOps op_def static X op(X z, Y *params) { return (nd4j::math::nd4j_exp<X, X>(params[0]) - z * params[0] + (z * nd4j::math::nd4j_log<X, Z>(z) - z + static_cast<X>(0.5f) * nd4j::math::nd4j_log<X, Z>(DOUBLE_PI_X * z))); } }; template <typename X, typename Y, typename Z> class LogPoissonLoss { public: op_def static Z op(X z, Y c) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc); } op_def static Z op(X z, Y c, Z *params) { auto zz = static_cast<Z>(z); auto zc = static_cast<Z>(c); return (nd4j::math::nd4j_exp<Y, Z>(c) - zz * zc); } op_def static Z op(X z) { return static_cast<Z>(z); } // op for MetaOps op_def static Z op(X z, Y *params) { return (nd4j::math::nd4j_exp<Y, Z>(params[0]) - static_cast<Z>(z) * static_cast<Z>(params[0])); } }; template <typename X, typename Y, typename Z> class Multiply { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 * d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 * d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 * params[0]); } op_def static X startingValue() { return static_cast<X>(1.f); } }; template <typename X, typename Y, typename Z> class Divide { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d1 / d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d1 / d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1 / params[0]); } op_def static X startingValue() { return static_cast<X>(1); } }; template <typename X, typename Y, typename Z> class DivideNoNan { public: op_def static Z op(X d1, Y d2) { if (d2 == (Y)0) return (Z)0; return static_cast<Z>(d1 / d2); } op_def static Z op(X d1, Y d2, Z *params) { if (d2 == (Y)0) return (Z)0; return static_cast<Z>(d1 / d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { if (params[0] == (Y)0) return (Z)0; return static_cast<Z>(d1 / params[0]); } op_def static X startingValue() { return static_cast<X>(1); } }; template <typename X, typename Y, typename Z> class SafeDivide { public: op_def static Z op(X d1, Y d2) { if(d2 == static_cast<Y>(0)) return static_cast<Z>(0); return static_cast<Z>(d1 / d2); } op_def static Z op(X d1, Y d2, Z *params) { if(d2 == static_cast<Y>(0)) return static_cast<Z>(0); return static_cast<Z>(d1 / d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { if(params[0] == static_cast<Y>(0)) return static_cast<Z>(0); return static_cast<Z>(d1 / params[0]); } }; template <typename X, typename Y, typename Z> class FloorDiv { public: op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2)); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / d2)); } op_def static Z op(X d1) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1)); } // op for MetaOps op_def static Z op(X d1, Y *params) { return nd4j::math::nd4j_floor<Z,Z>(static_cast<Z>(d1 / params[0])); } }; template <typename X, typename Y, typename Z> class TruncateDiv { public: op_def static Z op(X d1, Y d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 / i2); } op_def static Z op(X d1, Y d2, Z *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 / i2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(params[0]); return static_cast<Z>(i1 / i2); } }; template <typename X, typename Y, typename Z> class TruncateMod { public: op_def static Z op(X d1, Y d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 % i2); } op_def static Z op(X d1, Y d2, Z *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return static_cast<Z>(i1 % i2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(params[0]); return static_cast<Z>(i1 % i2); } }; template<typename X, typename Y, typename Z> class Remainder { public: op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_remainder<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return nd4j::math::nd4j_remainder<X, Y, Z>(d1, params[0]); } }; template <typename X, typename Y, typename Z> class FMod { public: op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return nd4j::math::nd4j_fmod<X, Y, Z>(d1, params[0]); } }; template <typename X, typename Y, typename Z> class FloorMod { public: op_def static Z op(X d1, Y d2) { auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); return (d1 < static_cast<X>(0)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2); } op_def static Z op(X d1, Y d2, Z *params) { auto m = nd4j::math::nd4j_fmod<X, Y, Z>(d1, d2); return (d1 < static_cast<X>(0.0f)) == (d2 < static_cast<Y>(0)) ? m : nd4j::math::nd4j_fmod<Z, Y, Z>(m + static_cast<Z>(d2), d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; template <typename X, typename Y, typename Z> class ReverseDivide { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2 / d1); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2 / d1); } op_def static Z op(X d1) { return static_cast<Z>(d1); } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(params[0] / d1); } }; template <typename X, typename Y, typename Z> class CopyPws { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1); } }; template <typename X> class Copy { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1; } }; template <typename X, typename Y, typename Z> class Copy2 { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } op_def static Z op(X d1, Y *params) { return static_cast<Z>(d1); } }; template <typename X, typename Y, typename Z> class Axpy { public: op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2 + d1); } op_def static Z op(X d1, Y d2, Z *params) { auto alpha = params[0]; return alpha * static_cast<Z>(d1) + static_cast<Z>(d2); } op_def static Z op(X d1) { return static_cast<Z>(d1); } }; template <typename X, typename Z> class Assign { public: no_op_exec_special_any no_op_exec_special_any_cuda op_def static Z op(X d1, X *params) { return static_cast<Z>(d1); } }; template <typename X, typename Z> class And { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return d2 + d1; } op_def static Z op(X d1, X d2, X *params) { if (params != nullptr) { auto comp = params[0]; return d1 != comp && d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0); } else { auto b1 = static_cast<bool>(d1); auto b2 = static_cast<bool>(d2); return (b1 && b2) ? static_cast<Z>(1) : static_cast<Z>(0); } } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, X *params) { return static_cast<Z>(119); } }; template <typename X> class IntOr { public: op_def static X op(X d1, X d2) { return d2 | d1; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class IntAnd { public: op_def static X op(X d1, X d2) { return d2 & d1; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class IntXor { public: op_def static X op(X d1, X d2) { return d2 ^ d1; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class ShiftLeft { public: op_def static X op(X d1, X d2) { return d1 << d2; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class ShiftRight { public: op_def static X op(X d1, X d2) { return d1 >> d2; } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class CyclicShiftLeft { public: op_def static X op(X d1, X d2) { return d1 << d2 | d1 >> ((sizeof(X) * 8) - d2); } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X> class CyclicShiftRight { public: op_def static X op(X d1, X d2) { return d1 >> d2 | d1 << ((sizeof(X) * 8) - d2); } op_def static X op(X d1, X d2, X *params) { return op(d1, d2); } }; template <typename X, typename Z> class Or { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return d2 + d1; } op_def static Z op(X d1, X d2, X *params) { if (params != nullptr) { auto comp = params[0]; return d1 != comp || d2 != comp ? static_cast<Z>(1) : static_cast<Z>(0); } else { auto b1 = static_cast<bool>(d1); auto b2 = static_cast<bool>(d2); return b1 || b2 ? static_cast<Z>(1) : static_cast<Z>(0); } } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, X *params) { return static_cast<Z>(119); } }; template <typename X, typename Z> class Xor { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return d2 + d1; } op_def static Z op(X d1, X d2, X *params) { if (params != nullptr) { auto comp = params[0]; return ((d1 == comp && d2 != comp) || (d1 != comp && d2 == comp)) ? static_cast<Z>(1) : static_cast<Z>(0); } else { auto b1 = static_cast<bool>(d1); auto b2 = static_cast<bool>(d2); return (!b1 && b2 )||(b1 && !b2) ? static_cast<Z>(1) : static_cast<Z>(0); } } op_def static Z op(X d1) { return d1; } }; template <typename X, typename Z> class Not { public: no_op_exec_special_bool no_op_exec_special_bool_cuda op_def static Z op(X d1, X d2) { return static_cast<Z>(0); } op_def static Z op(X d1, X d2, X *params) { return d1 != d2 ? static_cast<Z>(1) : static_cast<Z>(0); } // this transform op should run only on boolean input op_def static Z op(X d1, X *params) { auto b1 = static_cast<bool>(d1); return !b1; } }; template <typename X, typename Y, typename Z> class LogicalNot { public: op_def static Z op(X d1, Y d2) { return !((int) d1 && (int) d2); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<X>(!(static_cast<int>(d1) && static_cast<int>(d2))); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<X>(119); } }; template <typename X, typename Y, typename Z> class LogicalXor { public: op_def static Z op(X d1, Y d2) { auto i1 = static_cast<int>(d1); auto i2 = static_cast<int>(d2); return (i1 | i2) &~ (i1 & i2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(119); } }; template <typename X, typename Y, typename Z> class LogicalAnd { public: op_def static Z op(X d1, Y d2) { return static_cast<int>(d1) & static_cast<int>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(Y d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<Z>(119); } }; template <typename X, typename Y, typename Z> class LogicalOr { public: op_def static Z op(X d1, Y d2) { return static_cast<int>(d1) | static_cast<int>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1) { return d1; } // op for MetaOps op_def static Z op(X d1, Y *params) { return static_cast<X>(119); } }; template <typename X, typename Y, typename Z> class Mod { public: /* // just a optional note, feel free to remove later op_def static half op(half d1, half d2, half *params) { return __float2half(simdOps::Mod<float>::op(__half2float(d1), __half2float(d2), nullptr)); } */ op_def static Z op(X d1, Y d2) { return static_cast<int>(d1) % static_cast<int>(d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } // op for MetaOp op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; template <typename X, typename Y, typename Z> class ReverseMod { public: op_def static Z op(X d1, Y d2) { return static_cast<int>(d2) % static_cast<int>(d1); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } // op for MetaOp op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; /** * Whether 2 elements in an array * are epsilion equal */ template <typename X, typename Z> class Epsilon { public: op_def static Z op(X d1, X d2) { X diff = d1 - d2; X absDiff = nd4j::math::nd4j_abs<X>(diff); if (absDiff <= static_cast<X>(MIN_V)) return static_cast<Z>(1); return static_cast<Z>(0); } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class EqualTo { public: op_def static Z op(X d1, X d2) { return d1 == d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class NotEqualTo { public: op_def static Z op(X d1, X d2) { return d1 != d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class GreaterThanOrEqual { public: op_def static Z op(X d1, X d2) { return d1 >= d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } // FIXME: this signature clashes with MetaOp stuff op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class GreaterThan { public: op_def static Z op(X d1, X d2) { return d1 > d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } // FIXME: this signature clashes with MetaOp stuff op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class LessThan { public: op_def static Z op(X d1, X d2) { return d1 < d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X, typename Z> class LessThanOrEqual { public: op_def static Z op(X d1, X d2) { return d1 <= d2; } op_def static Z op(X d1, X d2, X *params) { return op(d1, d2); } op_def static Z op(X d1, X *params) { return d1; } }; template <typename X> class Abs { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_abs<X>(d1); } }; template <typename X> class Ceiling { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_ceil<X,X>(d1); } }; template <typename X> class Cosine { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_cos<X,X>(d1); } }; template <typename X> class Exp { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_exp<X, X>(d1); } }; template <typename X> class HardTanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return ((d1 >= static_cast<X>(-1.f) && d1 <= static_cast<X>(1.f)) ? static_cast<X>(1.f) : static_cast<X>(0.f)); } }; template <typename X> class HardTanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 < static_cast<X>(-1)) return static_cast<X>(-1); else if (d1 > static_cast<X>(1)) return static_cast<X>(1); else return d1; } }; template <typename X> class Floor { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_floor<X,X>(d1); } }; template <typename X> class Log { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_log<X, X>(d1); } }; template <typename X> class Log1p { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_log<X, X>(1 + d1); } }; template <typename X, typename Y, typename Z> class LogX { public: op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_log<X, Z>(d1) / nd4j::math::nd4j_log<Y, Z>(d2) ; } }; template <typename X> class StabilizeFP16 { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 <= static_cast<X>(0)) return static_cast<X>(nd4j::DataTypeUtils::min<float16>()); else return d1; } }; template <typename X> class StabilizeX { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 <= static_cast<X>(0)) return nd4j::DataTypeUtils::min<X>(); else return d1; } }; template <typename X> class SpecialDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * (static_cast<X>(1.f) - d1); } }; template <typename X> class Neg { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return -d1; } }; template <typename X> class Erf { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_erf<X,X>(d1); } }; template <typename X> class Erfc { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_erfc<X,X>(d1); } }; template <typename X> class Reciprocal { public: no_op_exec_special_same no_op_exec_special_same_cuda // op_def static T op(T d1) { // return (T(1.0f) / d1); // } // op for MetaOps op_def static X op(X d1, X *params) { return (static_cast<X>(1) / d1); } }; template <typename X, typename Z> class Sqr { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2)); } op_def static Z op(X d1) { return nd4j::math::nd4j_pow<X, X, Z>(d1, static_cast<X>(2)); } }; template <typename X, typename Y, typename Z> class RelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_re<X>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1) { return static_cast<Z>(0); } }; template <typename X, typename Y, typename Z> class BinaryRelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { X threshold = params[0]; return nd4j::math::nd4j_re<X>(d1, d2) > threshold ? static_cast<Z>(1) : static_cast<Z>(0); } op_def static Z op(X d1) { return static_cast<Z>(0); } }; template <typename X, typename Y, typename Z> class BinaryMinimumAbsoluteRelativeError { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, X *params) { X d2 = params[0]; X thresholdRelative = params[1]; X thresholdAbsolute = params[2]; return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0); } op_def static Z op(X d1, Y d2, Z *params) { X thresholdRelative = params[0]; X thresholdAbsolute = params[1]; return nd4j::math::nd4j_re<X>(d1, d2) > thresholdRelative ? (nd4j::math::nd4j_abs<X>(d1 - static_cast<X>(d2)) < thresholdAbsolute ? static_cast<Z>(0) : static_cast<Z>(1)) : static_cast<Z>(0); } op_def static Z op(X d1) { return static_cast<Z>(0); } }; template <typename X, typename Y, typename Z> class ReversePow { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_pow<X, X, Z>(params[0], d1); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_pow<X, Y, Z>(d2, d1); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_pow<X, Y, Z>(d2, d1); } op_def static Z op(X d1) { return d1; } }; template <typename X, typename Y, typename Z> class Pow { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_pow<X, X, Z>(d1, params[0]); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2); } op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_pow<X, Y, Z>(d1, d2); } op_def static Z op(X d1) { return d1; } }; template <typename X, typename Y, typename Z> class PowDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return params[0] * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(params[0]) - static_cast<Z>(1.f)); } op_def static Z op(X d1, Y d2) { return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f)); } op_def static Z op(X d1, Y d2, Z *params) { return static_cast<Z>(d2) * nd4j::math::nd4j_pow<X, Z, Z>(d1, static_cast<Z>(d2) - static_cast<Z>(1.f)); } op_def static Z op(X d1) { return d1; } }; template <typename X> class Round { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_round<X,X>(d1); } }; template <typename X, typename Z> class IsNan { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isnan(d1) ? static_cast<X>(1) : static_cast<X>(0); } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X> class Expm1 { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(1); } }; template <typename X, typename Z> class IsPositive { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return d1 > (X)0.f; } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class IsInf { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isinf<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0); } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class IsInfOrNan{ public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput == static_cast<X>(0) && old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput == static_cast<X>(0) && old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction != static_cast<X>(0); } }; template <typename X, typename Z> class IsFinite { public: no_op_exec_special_bool no_op_exec_special_bool_cuda no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda op_def static Z op(X d1, X *params) { return nd4j::math::nd4j_isfin<X>(d1) ? static_cast<Z>(1) : static_cast<Z>(0); } op_def static X startingValue(const X *input) { return static_cast<X>(1); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput == static_cast<X>(0) || old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput == static_cast<X>(0) || old == static_cast<X>(0) ? static_cast<Z>(0) : static_cast<Z>(1); } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction != static_cast<X>(0); } }; template <typename X> class ClipByValue { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { if (d1 > params[1]) return params[1]; if (d1 < params[0]) return params[0]; return d1; } }; template <typename X, typename Y, typename Z> class LstmClip { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { X _v = (X) d2; if (d1 > _v) return _v; else if (d1 < -_v) return -_v; else return d1; } }; template <typename X> class Swish { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * nd4j::math::nd4j_sigmoid<X,X>(d1); } }; template <typename X> class GELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * nd4j::math::nd4j_sigmoid<X,X>(static_cast<X>(1.702f) * d1); } }; template <typename X> class PreciseGELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto sp = nd4j::math::nd4j_sqrt<X, X>(static_cast<X>(2) / static_cast<X>(M_PI)); auto xp = d1 + nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(0.044715) * d1, static_cast<X>(3)); return (d1 / static_cast<X>(2)) * (static_cast<X>(1) + nd4j::math::nd4j_tanh<X, X>(sp * xp)); } }; template <typename X> class GELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto x17 = static_cast<X>(1.702f) * d1; auto ep = nd4j::math::nd4j_pow<X,X,X>(static_cast<X>(M_E), x17); // (E^(1.702 x) (1. + E^(1.702 x) + 1.702 x))/(1. + E^(1.702 x))^2 return (ep * (static_cast<X>(1.f) + ep + x17)) / nd4j::math::nd4j_pow<X, int, X>((static_cast<X>(1.f) + ep), 2); } }; template <typename X> class PreciseGELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto x79 = static_cast<X>(0.797885) * d1; auto x03 = nd4j::math::nd4j_pow<X, int, X>(static_cast<X>(0.0356774) * d1, 3); auto x39 = static_cast<X>(0.398942) * d1; auto x05 = nd4j::math::nd4j_pow<X, int, X>(static_cast<X>(0.0535161) * d1, 3); auto scz = nd4j::math::nd4j_sech<X, X>(x79 + x03); // 0.5 + (0.398942 x + 0.0535161 x^3) Sech[0.797885 x + 0.0356774 x^3]^2 + 0.5 Tanh[0.797885 x + 0.0356774 x^3] return static_cast<X>(0.5) + (x39 + x05) * (scz * scz) + static_cast<X>(0.5) * nd4j::math::nd4j_tanh<X, X>(x79 + x03); } }; template <typename X> class SwishDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { X ex = nd4j::math::nd4j_pow<X, X, X>(static_cast<X>(M_E), d1); return (ex * (d1 + ex + static_cast<X>(1.f))) / nd4j::math::nd4j_pow<X, X, X>((ex + static_cast<X>(1.f)) , static_cast<X>(2.f)); } }; template <typename X> class LogSigmoid { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_log<X, X>(nd4j::math::nd4j_sigmoid<X, X>(d1)); } }; template <typename X> class LogSigmoidDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { X ex = nd4j::math::nd4j_pow<X, X, X>(M_E, d1); return static_cast<X>(1.f) / (ex + static_cast<X>(1.f)); } }; template <typename X> class Sigmoid { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sigmoid<X, X>(d1); } }; template <typename X> class SigmoidDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sigmoidderivative<X, X>(d1); } }; template <typename X> class HardSigmoid { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_min<X>(static_cast<X>(1), nd4j::math::nd4j_max<X>(static_cast<X>(0), (static_cast<X>(0.2f)) * d1 + static_cast<X>(0.5f))); } }; template <typename X> class HardSigmoidDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 < static_cast<X>(-2.5f) || d1 > static_cast<X>(2.5f) ? static_cast<X>(0.f) : static_cast<X>(0.2f); } }; /** * Scale to be between a min and max */ template <typename X> class SetRange { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto min = params[0]; auto max = params[1]; if (static_cast<X>(d1) >= min && static_cast<X>(d1) <= max) return d1; if (min == static_cast<X>(0) && max == static_cast<X>(1)) { auto val = static_cast<X>(1) / (static_cast<X>(1) + nd4j::math::nd4j_exp<X, X>(-d1)); return (nd4j::math::nd4j_floor<X,X>(val * (max - min)) + min); } return (nd4j::math::nd4j_floor<X,X>(d1 * (max - min)) + min); } }; template <typename X> class Sin { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sin<X,X>(d1); } }; template <typename X> class Square { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * d1; } }; template <typename X, typename Z> class Sqrt { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return nd4j::math::nd4j_sqrt<X, Z>(d1); } }; template <typename X, typename Z> class RSqrt { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Z *params) { return static_cast<Z>(1) / nd4j::math::nd4j_sqrt<X, Z>(d1); } }; template <typename X> class Rint { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_rint<X,X>(d1); } }; template <typename X> class SoftPlus { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::softplus<X, X>(d1); } }; template <typename X> class Sign { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return (d1 > static_cast<X>(0)) - (d1 < static_cast<X>(0)); } }; template <typename X> class TimesOneMinus { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * (static_cast<X>(1) - d1); } }; template <typename X> class RationalTanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { // keep 2/3 as runtime variable, to match precision auto dis = (static_cast<X>(2) / static_cast<X>(3)) * d1; auto tanh = nd4j::math::nd4j_sgn<X,X>(dis) * (static_cast<X>(1) - (static_cast<X>(1) / (static_cast<X>(1) + static_cast<X>(nd4j::math::nd4j_abs<X>(dis)) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)) ))); return static_cast<X>(1.7159f) * tanh; } }; template <typename X> class RationalTanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { auto dis = (static_cast<X>(2.f) / static_cast<X>(3.f)) * d1; auto a = static_cast<X>(1.f) + nd4j::math::nd4j_abs<X>(dis) + nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(2.f)) + static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(4)); auto tDeriv = (static_cast<X>(1.f) + nd4j::math::nd4j_sign<X,X>(dis) * (static_cast<X>(2.f) * dis + static_cast<X>(4.f) * static_cast<X>(1.41645f) * nd4j::math::nd4j_pow<X, X, X>(dis, static_cast<X>(3)))) / (a * a); return static_cast<X>(1.7159f) * (static_cast<X>(2.f) / static_cast<X>(3.f)) * tDeriv; } }; template <typename X> class Tanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_tanh<X, X>(d1); } }; template <typename X> class RectifiedTanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_max<X>(static_cast<X>(0), nd4j::math::nd4j_tanh<X,X>(d1)); } }; template <typename X> class RectifiedTanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 > static_cast<X>(0.f) ? nd4j::math::nd4j_tanhderivative<X,X>(d1) : static_cast<X>(0.f); } }; template <typename X> class ATanh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_atanh<X,X>(d1); } }; template <typename X> class TanhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_tanhderivative<X,X>(d1); } }; template <typename X> class Cube { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 * d1 * d1; } }; template <typename X> class CubeDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(3) * d1 * d1; } }; template <typename X> class ACos { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_acos<X, X>(d1); } }; template <typename X> class ASinh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_asinh<X, X>(d1); } }; template <typename X> class ASinhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(nd4j::math::nd4j_pow<X, X, X>(d1, static_cast<X>(2.f)) + static_cast<X>(1.f))); } }; template <typename X> class ACosh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_acosh<X, X>(d1); } }; template <typename X> class ACoshDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.f) / (nd4j::math::nd4j_sqrt<X, X>(d1 - static_cast<X>(1.f)) * nd4j::math::nd4j_sqrt<X, X>(d1 + static_cast<X>(1.f))); } }; template <typename X> class Ones { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.0f); } }; template <typename X> class SoftSign { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_softsign<X, X>(d1); } }; template <typename X> class SoftSignDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_softsignderivative<X,X>(d1); } }; template <typename X, typename Z> class MatchConditionBool { public: no_op_exec_special_bool no_op_exec_special_bool_cuda // this op return 1.0 if condition met, 0.0 otherwise op_def static Z op(X d1, X *extraParams) { X compare = extraParams[0]; X eps = extraParams[1]; auto mode = static_cast<int>(extraParams[2]); //nd4j_printf("value: %f; comp: %f; eps: %f; mode: %i;\n", d1, compare, eps, mode); switch (mode) { case 0: // equals return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? true : false; case 1: // not equals return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? true : false; case 2: // less_than return d1 < compare ? true : false; case 3: // greater_than return d1 > compare ? true : false; case 4: // less_or_equals_than return d1 <= compare ? true : false; case 5: // greater_or_equals_than return d1 >= compare ? true : false; case 6: // abs_less_than return nd4j::math::nd4j_abs<X>(d1) < compare ? true : false; case 7: // abs_greater_than return nd4j::math::nd4j_abs<X>(d1) > compare ? true : false; case 8: // is inf return nd4j::math::nd4j_isinf(d1) ? true : false; case 9: // is nan return nd4j::math::nd4j_isnan(d1) ? true : false; case 10: return (d1 == compare) ? true : false; case 11: return (d1 != compare) ? true : false; case 12: // abs_greater_or_equals_than return nd4j::math::nd4j_abs<X>(d1) >= compare ? true : false; case 13: // abs_less_or_equals_than return nd4j::math::nd4j_abs<X>(d1) <= compare ? true : false; case 14: // isFinite return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)); case 15: // isInfinite return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1); default: printf("Undefined match condition: [%i]\n", mode); } return d1; } }; template <typename X, typename Z> class MatchCondition { public: no_op_exec_special no_op_exec_special_cuda no_op_exec_special_accumulation_long no_op_exec_special_accumulation_cuda op_def static Z startingValue(const X *input) { return static_cast<Z>(0); } op_def static Z merge(Z old, Z opOutput, X *extraParams) { return old + opOutput; } op_def static Z update(Z old, Z opOutput, X *extraParams) { return old + opOutput; } // this op return 1.0 if condition met, 0.0 otherwise op_def static Z op(X d1, X *extraParams) { X compare = extraParams[0]; X eps = extraParams[1]; auto mode = static_cast<int>(extraParams[2]); //printf("value: %f; comp: %f; eps: %f; mode: %i;\n", (float) d1, (float) compare, (float) eps, mode); switch (mode) { case 0: // equals return nd4j::math::nd4j_abs<X>(d1 - compare) <= eps ? 1 : 0; case 1: // not equals return nd4j::math::nd4j_abs<X>(d1 - compare) > eps ? 1 : 0; case 2: // less_than return d1 < compare ? 1 : 0; case 3: // greater_than return d1 > compare ? 1 : 0; case 4: // less_or_equals_than return d1 <= compare ? 1 : 0; case 5: // greater_or_equals_than return d1 >= compare ? 1 : 0; case 6: // abs_less_than return nd4j::math::nd4j_abs<X>(d1) < compare ? 1 : 0; case 7: // abs_greater_than return nd4j::math::nd4j_abs<X>(d1) > compare ? 1 : 0; case 8: // is inf return nd4j::math::nd4j_isinf(d1) ? 1 : 0; case 9: // is nan return nd4j::math::nd4j_isnan(d1) ? 1 : 0; case 10: return (d1 == compare) ? 1 : 0; case 11: return (d1 != compare) ? 1 : 0; case 12: // abs_greater_or_equals_than return nd4j::math::nd4j_abs<X>(d1) >= compare ? 1 : 0; case 13: // abs_less_or_equals_than return nd4j::math::nd4j_abs<X>(d1) <= compare ? 1 : 0; case 14: // isFinite return !(nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1)) ? 1 : 0; case 15: // isInfinite return nd4j::math::nd4j_isinf(d1) || nd4j::math::nd4j_isnan(d1) ? 1 : 0; default: printf("Undefined match condition: [%i]\n", mode); } return d1; } op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Y, typename Z> class ELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_elu<X,Z>(d1, static_cast<X>(d2)); } }; template <typename X, typename Y, typename Z> class ELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_eluderivative<X,Z>(d1, static_cast<X>(d2)); } }; template <typename X, typename Y, typename Z> class RELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { auto xt = static_cast<Z>(d1); auto xf = static_cast<Z>(d2); return xt < xf ? xf : xt; } }; template <typename X, typename Y, typename Z> class SXELogitsSmoother { public: op_def static Z op(X d1, Y d2, Z *params) { return d1 * ((X)1.f - (X) d2) + (X)(0.5f) * (X) d2; } }; template <typename X, typename Y, typename Z> class RELU6 { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { auto relu = simdOps::RELU<X,Y,Z>::op(d1, d2, params); return relu < static_cast<Z>(6) ? relu : static_cast<Z>(6); } }; template <typename X, typename Y, typename Z> class LeakyRELU { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { auto val = static_cast<Z>(d1); auto alpha = static_cast<Z>(d2); return val < 0.0f ? alpha * val : val; } }; template <typename X> class SELU { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 > static_cast<X>(0.0f) ? static_cast<X>(SELU_LAMBDA) * static_cast<X>(d1) : static_cast<X>(SELU_LAMBDA) * (static_cast<X>(SELU_ALPHA) * nd4j::math::nd4j_exp<X, X>(d1) - static_cast<X>(SELU_ALPHA)); } }; template <typename X> class SELUDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1 > static_cast<X>(0.f) ? static_cast<X>(SELU_LAMBDA) : static_cast<X>(SELU_ALPHA) * static_cast<X>(SELU_LAMBDA) * nd4j::math::nd4j_exp<X, X>(d1); } }; template <typename X, typename Y, typename Z> class LeakyRELUDerivative { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { if (d1 >= static_cast<X>(0)) return static_cast<Z>(1); else return static_cast<Z>(d2); } }; template <typename X> class ASin { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_asin<X,X>(d1); } }; template <typename X> class Sinh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_sinh<X,X>(d1); } }; template <typename X> class SinhDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_cosh<X, X>(d1); } }; template <typename X> class Cosh { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_cosh<X,X>(d1); } }; template <typename X> class Tan { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_tan<X,X>(d1); } }; template <typename X> class TanDerivative { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1.f) / nd4j::math::nd4j_pow<X, X, X>(nd4j::math::nd4j_cos<X, X>(d1), static_cast<X>(2.0f)); } }; template <typename X> class ATan { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return nd4j::math::nd4j_atan<X, X>(d1); } }; template <typename X, typename Y, typename Z> class Atan2 { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_atan2<X, Z>(d2, d1); } op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } // op for MetaOps op_def static Z op(X d1, Y *params) { return op(d1, params[0]); } }; template <typename X> class Identity { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return d1; } }; template <typename X> class Stabilize { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { X k = params[0]; if (d1 * k > static_cast<X>(- MIN_CUTFOFF)) return static_cast<X>(- MIN_CUTFOFF) / k; else if (d1 * k < static_cast<X>(MIN_CUTFOFF)) return static_cast<X>(MIN_CUTFOFF) / k; return d1; } }; template <typename X, typename Y, typename Z> class Step { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static Z op(X d1, Y d2, Z *params) { return (d1 > static_cast<X>(d2) ? static_cast<Z>(1) : static_cast<Z>(0)); } }; template <typename X> class OneMinus { public: no_op_exec_special_same no_op_exec_special_same_cuda op_def static X op(X d1, X *params) { return static_cast<X>(1) - d1; } }; template <typename X> class Sum { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static X merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X> class ReduceSameBenchmarkOp { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static X merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static X op(X d1, X *extraParams) { auto f1 = static_cast<float>(d1); return static_cast<X>(nd4j::math::nd4j_pow<float,float,float>(f1, 3) + nd4j::math::nd4j_log<float,float>(f1) * nd4j::math::nd4j_sin<float,float>(f1) / nd4j::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1) * nd4j::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1) - nd4j::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1)); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class ShannonEntropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { auto p = d1 * d1; return static_cast<Z>(p) * nd4j::math::nd4j_log<X, Z>(p); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return -reduction; } }; template <typename X, typename Z> class LogEntropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { //entropy is -sum(p(x) * log(p(x))); log entropy is log of this return nd4j::math::nd4j_log<Z, Z>(-reduction); } }; template <typename X, typename Z> class Entropy { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1) * nd4j::math::nd4j_log<X, Z>(d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return static_cast<Z>(-reduction); //entropy is -sum(p(x) * log(p(x))) } }; template <typename X> class ASum { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::ASUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old); } op_def static X op(X d1, X *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return nd4j::math::nd4j_abs<X>(reduction); } }; template <typename X, typename Z> class CountNonZero { public: no_op_exec_special_accumulation_long no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::ASUM; op_def static Z startingValue(const X *input) { return static_cast<Z>(0); } op_def static Z merge(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z op(X d1, X *extraParams) { return d1 == static_cast<X>(0.0f) ? static_cast<Z>(0.0f) : static_cast<Z>(1.0f); } op_def static Z postProcess(Z reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class CountZero { public: no_op_exec_special_accumulation_long no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static Z startingValue(const X *input) { return static_cast<Z>(0.0f); } op_def static Z merge(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, X *extraParams) { return opOutput + old; } op_def static Z op(X d1, X *extraParams) { return d1 == static_cast<X>(0) ? static_cast<X>(1) : static_cast<X>(0); } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return static_cast<Z>(reduction); } }; template <typename X> class Prod { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT; op_def static X startingValue(const X *input) { return static_cast<X>(1); } op_def static X merge(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static X update(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class Any { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput + old; } op_def static Z op(X d1, X *extraParams) { return d1; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0) ; } }; template <typename X, typename Z> class All { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::PRODUCT; op_def static X startingValue(const X *input) { return static_cast<X>(1); } op_def static Z merge(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static Z update(X old, X opOutput, X *extraParams) { return opOutput * old; } op_def static Z op(X d1, X *extraParams) { return d1; } op_def static Z postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction > static_cast<X>(0) ? static_cast<Z>(1) : static_cast<Z>(0); } }; template <typename X, typename Z> class Mean { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return d1; } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return reduction / (Z) n; } }; template <typename X, typename Z> class ReduceFloatBenchmarkOp { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { auto f1 = static_cast<float>(d1); return static_cast<Z>(nd4j::math::nd4j_pow<float,float,float>(f1, 3) + nd4j::math::nd4j_log<float,float>(f1) * nd4j::math::nd4j_sin<float,float>(f1) / nd4j::math::nd4j_tanh<float,float>(static_cast<float>(M_E) * static_cast<float>(M_PI) * f1) * nd4j::math::nd4j_sqrt<float,float>(static_cast<float>(M_PI) / f1) - nd4j::math::nd4j_atan<float,float>(static_cast<float>(M_E) / f1)); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return (Z) reduction / (Z) n; } }; template <typename X, typename Z> class AMean { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return nd4j::math::nd4j_abs<X>(opOutput) + nd4j::math::nd4j_abs<X>(old); } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_abs<Z>(reduction) / static_cast<Z>(n); } }; template <typename X> class Max { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::MAX; op_def static X startingValue(const X *input) { return -nd4j::DataTypeUtils::infOrMax<X>(); } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(old, opOutput); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(opOutput, old); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_max<X>(d1, d2); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_max<X>(d1, d2); } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Y, typename Z> class AMaxPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1, Y d2) { auto z1 = static_cast<Z>(d1); auto z2 = static_cast<Z>(d2); if (nd4j::math::nd4j_abs<Z>(z1) > nd4j::math::nd4j_abs<Z>(z2)) return z1; else return z2; } }; template <typename X, typename Y, typename Z> class AMinPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return op(d1, d2); } op_def static Z op(X d1, Y d2) { auto z1 = static_cast<Z>(d1); auto z2 = static_cast<Z>(d2); if (nd4j::math::nd4j_abs<Z>(z1) < nd4j::math::nd4j_abs<Z>(z2)) return z1; else return z2; } }; template <typename X, typename Y, typename Z> class MaxPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_max<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } }; template <typename X, typename Y, typename Z> class MinPairwise { public: op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } op_def static Z op(X d1, Y d2) { return nd4j::math::nd4j_min<Z>(static_cast<Z>(d1), static_cast<Z>(d2)); } }; template <typename X> class AMax { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::AMAX; op_def static X startingValue(const X *input) { return input[0]; } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput)); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old)); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_max<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2)); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_abs<X>(d1) > nd4j::math::nd4j_abs<X>(d2) ? d1 : d2; } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return nd4j::math::nd4j_abs<X>(reduction); } }; template <typename X> class AMin { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::AMIN; op_def static X startingValue(const X *input) { return input[0]; } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(old), nd4j::math::nd4j_abs<X>(opOutput)); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(opOutput), nd4j::math::nd4j_abs<X>(old)); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2)); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_min<X>(nd4j::math::nd4j_abs<X>(d1), nd4j::math::nd4j_abs<X>(d2)); } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return nd4j::math::nd4j_abs<X>(d1); } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return nd4j::math::nd4j_abs<X>(reduction); } }; template <typename X> class Min { public: no_op_exec_special_accumulation_same no_op_exec_special_accumulation_same_cuda const static functions::ReduceType reduceType = functions::ReduceType::MIN; op_def static X startingValue(const X *input) { return nd4j::DataTypeUtils::infOrMax<X>(); } op_def static X merge(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(old, opOutput); } op_def static X update(X old, X opOutput, X *extraParams) { return nd4j::math::nd4j_min<X>(opOutput, old); } op_def static X op(X d1, X d2, X *params) { return nd4j::math::nd4j_min<X>(d1, d2); } op_def static X op(X d1, X d2) { return nd4j::math::nd4j_min<X>(d1, d2); } // FIXME: this signature overlaps with MetaOp op_def static X op(X d1, X *extraParams) { return d1; } op_def static X postProcess(X reduction, Nd4jLong n, X *extraParams) { return reduction; } }; template <typename X, typename Z> class Norm1 { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(nd4j::math::nd4j_abs<X>(d1)); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return reduction; } }; template <typename X, typename Z> class Norm2 { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_sqrt<Z, Z>(reduction); } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1 * d1); } }; template <typename X, typename Z> class SquaredNorm { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1 * d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return reduction; } }; template <typename X, typename Z> class NormFrobenius { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { X v = nd4j::math::nd4j_abs<X>(d1); return static_cast<Z>(v * v); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_sqrt<Z, Z>(reduction); } }; template <typename X, typename Z> class NormP { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z op(X d1, Z *extraParams) { return nd4j::math::nd4j_pow<X, Z, Z>(nd4j::math::nd4j_abs<X>(d1), extraParams[0]); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_pow<Z, Z, Z>(reduction, static_cast<Z>(1.0f) / extraParams[0]); } }; template <typename X, typename Z> class NormMax { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0); } op_def static Z merge(Z old, Z opOutput, Z *extraParams) { return opOutput + old; } op_def static Z update(Z old, Z opOutput, Z *extraParams) { return nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(old), nd4j::math::nd4j_abs<Z>(opOutput)); } op_def static Z op(X d1, Z *extraParams) { return static_cast<Z>(d1); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParams) { return nd4j::math::nd4j_max<Z>(nd4j::math::nd4j_abs<Z>(reduction), nd4j::math::nd4j_abs<Z>(reduction)); } }; template <typename X, typename Z> class Variance { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static Z update(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static X op(X d1, Z *extraParams) { X mean = static_cast<X>(extraParams[0]); X ret = d1 - mean; return ret * ret; } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { // T bias = extraParams[1]; // return (reduction - (nd4j::math::nd4j_pow<T>(bias, static_cast<T>(2.0f)) / static_cast<T>(n))) / (n - 1) return static_cast<Z>(reduction) / static_cast<Z>(n - 1); } }; /** * Standard deviation of a buffer */ template <typename X, typename Z> class StandardDeviation { public: no_op_exec_special_accumulation no_op_exec_special_accumulation_cuda const static functions::ReduceType reduceType = functions::ReduceType::SUM; op_def static X startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Z merge(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static Z update(X old, X opOutput, Z *extraParams) { return old + opOutput; } op_def static Z op(X d1, Z *extraParams) { X mean = extraParams[0]; X ret = d1 - mean; return ret * ret; } op_def static Z postProcess(X reduction, Nd4jLong n, Z *extraParams) { Z ret = Variance<X,Z>::postProcess(reduction, n, extraParams); Z sqrtRet = nd4j::math::nd4j_sqrt<X, Z>(ret); return sqrtRet; } }; template <typename X, typename Y> class CosineSimilarity { public: static const int extraParamsLen = 2; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { return reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1])); } op_def static Y op(X d1, X d2, Y *extraParams) { extraParams[0] += static_cast<Y>(d1 * d1); extraParams[1] += static_cast<Y>(d2 * d2); return static_cast<Y>(d1 * d2); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],static_cast<Y>(d1 * d1)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1],static_cast<Y>(d2 * d2)); return static_cast<Y>(d1 * d2); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; template <typename X, typename Y> class JaccardDistance { public: static const int extraParamsLen = 2; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<X>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { // num / denom return (static_cast<Y>(1.0f)) - (extraParams[0] / extraParams[1]); } op_def static Y num(X d1, X d2) { return nd4j::math::nd4j_min<X>(d1, d2); } op_def static Y denom(X d1, X d2) { return nd4j::math::nd4j_max<X>(d1, d2); } op_def static Y op(X d1, X d2, Y *extraParams) { extraParams[0] += static_cast<Y>(num(d1, d2)); extraParams[1] += static_cast<Y>(denom(d1, d2)); return static_cast<Y>(0.0f); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0],num(d1, d2)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], denom(d1, d2)); return static_cast<Y>(0.0f); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; template <typename X, typename Y> class SimpleHammingDistance { public: static const int extraParamsLen = 0; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { return static_cast<Y>(reduction / n); } op_def static Y op(X d1, X d2, Y *extraParams) { return (d1 == d2) ? static_cast<Y>(0.0f) : static_cast<Y>(1.0f); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParams) { return op(d1, d2, extraParams); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; template <typename X, typename Y> class CosineDistance { public: static const int extraParamsLen = 2; op_def static X *generateExtraParams() { //T *extraParams = new T[2]; return nullptr; } op_def static void finalizeExtraParams(X *extraParams) { //delete[] extraParams; } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParams) { return (static_cast<Y>(1.0f)) - (reduction / (nd4j::math::nd4j_sqrt<Y, Y>(extraParams[0]) * nd4j::math::nd4j_sqrt<Y, Y>(extraParams[1]))); } op_def static Y op(X d1, X d2, Y *extraParams) { extraParams[0] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d1) * nd4j::math::nd4j_abs<X>(d1)); extraParams[1] += static_cast<Y>(nd4j::math::nd4j_abs<X>(d2) * nd4j::math::nd4j_abs<X>(d2)); return (d1 * d2); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { extraParamsTotal[0] += extraParamsLocal[0]; extraParamsTotal[1] += extraParamsLocal[1]; } #ifdef __CUDACC__ static _CUDA_D inline Y opAtomic(X d1, X d2, Y *extraParams) { nd4j::math::atomics::nd4j_atomicAdd(&extraParams[0], nd4j::math::nd4j_abs<Y>(d1) * nd4j::math::nd4j_abs<Y>(d1)); nd4j::math::atomics::nd4j_atomicAdd(&extraParams[1], nd4j::math::nd4j_abs<Y>(d2) * nd4j::math::nd4j_abs<Y>(d2)); return (d1 * d2); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParams) { return old + opOutput; } op_def static Y merge(Y old, Y opOutput, Y *extraParams) { return update(old, opOutput, extraParams); } }; /** * Dot product between 2 arrays */ template <typename X, typename Y> class Dot { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op //delete[] * extraParamsRef; } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) { return reduction; } op_def static Y op(X d1, X d2, Y *extraParamsRef) { return static_cast<Y>(d1 * d2); } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) { return opOutput + old; } op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {} }; /** * Op to check equality within arrays */ template <typename X, typename Z> class EqualsWithEps { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op } op_def static Z startingValue(const X *input) { return static_cast<Z>(0.0f); } op_def static Z postProcess(Z reduction, Nd4jLong n, Z *extraParamsRef) { return reduction; } op_def static Z op(X d1, X d2, Z *extraParamsRef) { double eps = nd4j::math::nd4j_abs<double>(extraParamsRef[2]); return static_cast<Z>(!nd4j::math::nd4j_eq<X>(d1, d2, eps)); } #ifdef __CUDACC__ __device__ static inline Z opAtomic(X d1, X d2, Z *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static Z update(Z old, Z opOutput, Z *extraParamsRef) { return opOutput + old; } op_def static Z merge(X old, Z opOutput, Z *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(Z *extraParamsTotal, Z *extraParamsLocal) {} }; template <typename X, typename Y> class EuclideanDistance { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) { return nd4j::math::nd4j_sqrt<Y, Y>(reduction); } op_def static Y op(X d1, X d2, Y *extraParamsRef) { X ret = d1 - d2; return static_cast<Y>(ret * ret); } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) { return opOutput + old; } op_def static Y merge(Y old, Y opOutput, Y *extraParamsRef) { return update(old, opOutput, extraParamsRef); } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) {} }; template <typename X, typename Y> class ManhattanDistance { public: static const int extraParamsLen = 0; op_def static X * generateExtraParams() { return nullptr; } op_def static void finalizeExtraParams(X *extraParamsRef) { //no-op } op_def static Y startingValue(const X *input) { return static_cast<Y>(0.0f); } op_def static Y postProcess(Y reduction, Nd4jLong n, Y *extraParamsRef) { return reduction; } op_def static Y op(X d1, X d2, Y *extraParamsRef) { return nd4j::math::nd4j_abs<X>(d1 - d2); } op_def static Y update(Y old, Y opOutput, Y *extraParamsRef) { return old + opOutput; } op_def static void aggregateExtraParams(Y *extraParamsTotal, Y *extraParamsLocal) { } #ifdef __CUDACC__ __device__ static inline Y opAtomic(X d1, X d2, Y *extraParamsRef) { return op(d1, d2, extraParamsRef); } #endif #ifndef __clang__ #pragma omp declare simd uniform(extraParamsRef) #endif op_def static Y merge(X old, X opOutput, X *extraParamsRef) { return update(old, opOutput, extraParamsRef); } }; template <typename X, typename Z> class IndexAbsoluteMax { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return nd4j::math::nd4j_abs<X>(val); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value); old.value = nd4j::math::nd4j_abs<X>(old.value); if (opOutput.value > old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (nd4j::math::nd4j_abs<X>(f1.value) > nd4j::math::nd4j_abs<X>(f2.value)) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline X startingValue(const X *input) { return 0; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X, typename Z> class FirstIndex { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { #ifdef __CUDACC__ if (opOutput.index < 0) return old; #endif auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams); //printf("res: %f; oldIdx: %i; newIdx: %i\n", res, old.index, opOutput.index); if (res == static_cast<X>(0)) return old; if (old.index < 0) return opOutput; if (old.index > opOutput.index) return opOutput; return old; } static _CUDA_HD inline X startingValue(const X *input) { return -nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = -1; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.index > f2.index) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } }; template <typename X, typename Z> class LastIndex { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { #ifdef __CUDACC__ if (opOutput.index < 0) return old; #endif auto res = simdOps::MatchCondition<X,X>::op(opOutput.value, extraParams); if (res == static_cast<X>(0)) return old; if (old.index < 0) return opOutput; if (old.index < opOutput.index) return opOutput; return old; } static _CUDA_HD inline X startingValue(const X *input) { return -nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = -1; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.index < f2.index) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } }; template <typename X, typename Z> class IndexMax { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { if (opOutput.value > old.value) { return opOutput; } #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.value > f2.value) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline X startingValue(const X *input) { return -nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X, typename Z> class IndexAbsoluteMin { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op( functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD inline X startingValue(const X *input) { return nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { opOutput.value = nd4j::math::nd4j_abs<X>(opOutput.value); old.value = nd4j::math::nd4j_abs<X>(old.value); if (opOutput.value < old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (nd4j::math::nd4j_abs<X>(f1.value) < nd4j::math::nd4j_abs<X>(f2.value)) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X, typename Z> class IndexMin { public: static _CUDA_HD inline functions::indexreduce::IndexValue<X> op( functions::indexreduce::IndexValue<X> val, X *extraParams) { return val; } static _CUDA_HD inline X startingValue(const X *input) { return nd4j::DataTypeUtils::infOrMax<X>(); } static _CUDA_HD inline functions::indexreduce::IndexValue<X> startingIndexValue(X *input) { functions::indexreduce::IndexValue<X> local; local.value = startingValue(input); local.index = 0; return local; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> update(functions::indexreduce::IndexValue<X> &old, functions::indexreduce::IndexValue<X> &opOutput, X *extraParams) { if (opOutput.value < old.value) return opOutput; #ifdef __CUDACC__ // workaround for cuda race condition at merge phase else if (opOutput.value == old.value && opOutput.index < old.index) return opOutput; #elif defined(__GNUC__) #endif return old; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> merge( functions::indexreduce::IndexValue<X> f1, functions::indexreduce::IndexValue<X> f2, X *extraParams) { if (f1.value < f2.value) return f2; return f1; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> postProcess( functions::indexreduce::IndexValue<X> reduction, int n, int xOffset, X *dx, int incx, X *extraParams, X *result) { return reduction; } static _CUDA_HD inline functions::indexreduce::IndexValue<X> op(functions::indexreduce::IndexValue<X> d1, functions::indexreduce::IndexValue<X> d2, X *extraParams) { return d1; } }; template <typename X, typename Z> class SummaryStatsVariance { public: static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) { if (biasCorrected) { Z ret = static_cast<Z>(val.varianceBiasCorrected()); if (ret < static_cast<Z>(0.0f)) return static_cast<Z>(val.variance()); return ret; } return static_cast<Z>(val.variance()); } static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) { return d1; } }; template <typename X, typename Z> class SummaryStatsStandardDeviation { public: static _CUDA_HD inline Z getValue(const bool biasCorrected, functions::summarystats::SummaryStatsData<X> val) { if (biasCorrected) { auto ret = static_cast<Z>(val.varianceBiasCorrected()); if (ret < static_cast<Z>(0.0f)) return nd4j::math::nd4j_sqrt<double, Z>(val.variance()); else return nd4j::math::nd4j_sqrt<double, Z>(ret); } return nd4j::math::nd4j_sqrt<double, Z>(val.variance()); } static _CUDA_HD inline functions::summarystats::SummaryStatsData<X> op(functions::summarystats::SummaryStatsData<X> d1, Z *extraParams) { return d1; } }; template <typename X> class DropOut { public: no_op_exec_special_same no_op_exec_special_same_cuda inline _CUDA_D static X op(X d1, X *params) { X prob = params[0]; #ifdef __CUDACC__ X length = params[1]; X tid = blockIdx.x * blockDim.x + threadIdx.x; X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid))); #else X rnd = static_cast<X>(rand() / RAND_MAX); #endif return rnd >= prob ? static_cast<X>(0.0f) : d1; } }; template <typename X, typename Y, typename Z> class DropOutInverted { public: no_op_exec_special no_op_exec_special_cuda #ifdef __CUDACC__ __device__ #endif inline static Z op(X d1, Y d2, Z *params) { Y prob = d2; #ifdef __CUDACC__ X length = params[1]; X tid = blockIdx.x * blockDim.x + threadIdx.x; X rnd = nd4j::math::nd4j_abs<X>(nd4j::math::nd4j_cos<X>(static_cast<X>(clock64()) * static_cast<X>(tid) + static_cast<X>(length) * static_cast<X>(tid))); #else X rnd = static_cast<X>(rand() / RAND_MAX); #endif return rnd >= static_cast<X>(prob) ? static_cast<Z>(0.0f) : reinterpret_cast<Z>(d1 / static_cast<X>(prob)); } }; template <typename X, typename Y, typename Z> class ReplaceNans { public: no_op_exec_special no_op_exec_special_cuda op_def static Z op(X d1, Y d2, Z *params) { return nd4j::math::nd4j_isnan(d1) ? static_cast<Z>(d2) : static_cast<Z>(d1) ; } }; // this op is used for conditional pairwise transforms only template <typename X, typename Y, typename Z> class CompareAndReplace{ public: // op definition for PairWise Transform op_def static Z op(X d1, Y d2, Z *params) { auto zd1 = static_cast<Z>(d1); auto zd2 = static_cast<Z>(d2); auto compare = params[0]; auto eps = params[2]; int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<Z>(zd1 - compare) <= eps) return zd2; else return zd1; else if (mode == 1) // not equals eps if (nd4j::math::nd4j_abs<Z>(zd1 - compare) > eps) return zd2; else return zd1; else if (mode == 2) // less_than eps if (zd1 < compare) return zd2; else return zd1; else if (mode ==3) // greater_than if (zd1 > compare) return zd2; else return zd1; else if (mode == 4) // less_or_equals_than if (zd1 <= compare) return zd2; else return zd1; else if (mode == 5) // greater_or_equals_than if (zd1 >= compare) return zd2; else return zd1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<Z>(zd1) < compare) return zd2; else return zd1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<Z>(zd1) > compare) return zd2; else return zd1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(zd1)) return zd2; else return zd1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(zd1)) return zd2; else return zd1; else if (mode == 10) if (zd1 == compare) return zd2; else return zd1; else if (mode == 11) if (zd1 != compare) return zd2; else return zd1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<Z>(zd1) >= compare) return zd2; else return zd1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<Z>(zd1) <= compare) return zd2; else return zd1; else printf("Undefined boolean operation: [%i]\n", mode); return zd1; } }; template <typename X, typename Y, typename Z> class CompareAndSet { public: // op definition for PairWise Transform op_def static Z op(X dX, Y dY, Z *params) { auto d1 = static_cast<Z>(dX); auto d2 = static_cast<Z>(dY); auto compare = params[0]; auto eps = params[2]; auto mode = static_cast<int>(params[3]); if (mode == 0) // equals if (nd4j::math::nd4j_abs<Z>(d2 - compare) <= eps) return d2; else return d1; else if (mode == 1) // not equals if (nd4j::math::nd4j_abs<Z>(d2 - compare) > eps) return d2; else return d1; else if (mode == 2) // less_than if (d2 < compare) return d2; else return d1; else if (mode ==3) // greater_than if (d2 > compare) return d2; else return d1; else if (mode == 4) // less_or_equals_than if (d2 <= compare) return d2; else return d1; else if (mode == 5) // greater_or_equals_than if (d2 >= compare) return d2; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<Z>(d2) < compare) return d2; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<Z>(d2) > compare) return d2; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d2)) return d2; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d2)) return d2; else return d1; else if (mode == 10) if (d2 == compare) return d2; else return d1; else if (mode == 11) if (d2 != compare) return d2; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<Z>(d1) >= compare) return d2; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<Z>(d1) <= compare) return d2; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } }; template <typename X> class CompareAndSetTransform { public: no_op_exec_special_same no_op_exec_special_same_cuda // op definition for Transform op_def static X op(X d1, X *params) { auto compare = params[0]; auto set = params[1]; auto eps = params[2]; // with mode == 0 we do set if d1 equals to compare, and with mode == 1 - we go otherwise int mode = (int) params[3]; if (mode == 0) // equals if (nd4j::math::nd4j_abs<X>(d1 - compare) <= eps) return set; else return d1; //return nd4j::math::nd4j_abs<T>(d1 - compare) <= eps ? set : d1; else if (mode == 1) // not equals if (nd4j::math::nd4j_abs<X>(d1 - compare) > eps) return set; else return d1; //return nd4j::math::nd4j_abs<T>(d1 - compare) > eps ? set : d1; else if (mode == 2) // less_than if (d1 < compare) return set; else return d1; else if (mode ==3) // greater_than if (d1 > compare) return set; else return d1; else if (mode == 4) // less_or_equals_than if (d1 <= compare) return set; else return d1; else if (mode == 5) // greater_or_equals_than if (d1 >= compare) return set; else return d1; else if (mode == 6) // abs_less_than if (nd4j::math::nd4j_abs<X>(d1) < compare) return set; else return d1; else if (mode == 7) // abs_greater_than if (nd4j::math::nd4j_abs<X>(d1) > compare) return set; else return d1; else if (mode == 8) // is inf if (nd4j::math::nd4j_isinf(d1)) return set; else return d1; else if (mode == 9) // is nan if (nd4j::math::nd4j_isnan(d1)) return set; else return d1; else if (mode == 10) if (d1 == compare) return set; else return d1; else if (mode == 11) if (d1 != compare) return set; else return d1; else if (mode == 12) // abs_greater_or_equals_than if (nd4j::math::nd4j_abs<X>(d1) >= compare) return set; else return d1; else if (mode == 13) // abs_less_or_equals_than if (nd4j::math::nd4j_abs<X>(d1) <= compare) return set; else return d1; else printf("Undefined boolean operation: [%i]\n", mode); return d1; } }; } #endif
main.c
#include <stdio.h> #include <stdlib.h> #include <math.h> #include <string.h> #include <time.h> #include "omp.h" #include "functions.h" int main (int argc, char **argv) { int Nthreads = 1; omp_set_num_threads(Nthreads); //seed value for the randomizer double seed = clock(); //this will make your program run differently everytime //double seed = 0; //uncomment this and your program will behave the same everytime it's run srand(seed); //declare storage for an ElGamal cryptosytem unsigned int p, g, h, x; //begin with rank 0 getting user's input unsigned int n; printf("Enter a number of bits: "); fflush(stdout); char status = scanf("%u",&n); //make sure the input makes sense if ((n<9)||(n>31)) {//Updated bounds. 8 is no good (need to encode chars) printf("Unsupported bit size.\n"); return 0; } printf("\n"); //setup an ElGamal cryptosystem setupElGamal(n,&p,&g,&h,&x); int bufferSize = 1024; unsigned char *message = (unsigned char *) malloc(bufferSize*sizeof(unsigned char)); //populate the string with a message strcpy(message, "Hello, this is the message as a string."); printf("Message = \"%s\"\n", message); /* Q1.1 Finish this line */ unsigned int charsPerInt = sizeof(p) / sizeof(n); padString(message, charsPerInt); printf("Padded Message = \"%s\"\n", message); unsigned int Nchars = strlen(message); unsigned int Nints = strlen(message)/charsPerInt; //storage for message as elements of Z_p unsigned int *Zmessage = (unsigned int *) malloc(Nints*sizeof(unsigned int)); //storage for extra encryption coefficient unsigned int *a = (unsigned int *) malloc(Nints*sizeof(unsigned int)); // cast the string into an unsigned int array convertStringToZ(message, Nchars, Zmessage, Nints); //Encrypt the Zmessage with the ElGamal cyrptographic system ElGamalEncrypt(Zmessage,a,Nints,p,g,h); printf("The encrypted text is: "); for (unsigned int i=0;i<Nints;i++) { printf("(%u,%u) ", Zmessage[i], a[i]); } printf("]\n"); //Decrypt the Zmessage with the ElGamal cyrptographic system ElGamalDecrypt(Zmessage,a,Nints,p,x); convertZToString(Zmessage, Nints, message, Nchars); printf("Decrypted Message = \"%s\"\n", message); printf("\n"); //Suppose we don't know the secret key. Use OpenMP threads to try and find it in parallel printf("Using %d OpenMP threads to find the secret key...\n", Nthreads); /* Q2.3 Parallelize this loop with OpenMP */ double startTime = omp_get_wtime(); int stop = 0; #pragma omp parallel for for (unsigned int i=0;i<p-1;i++) { if (stop == 0) continue; #pragma omp critical { if (modExp(g,i+1,p)==h) { printf("Secret key found! x = %u \n", i+1); stop = 1; } } } double endTime = omp_get_wtime(); double totalTime = endTime-startTime; double work = (double) p; double throughput = work/totalTime; printf("Searching all keys took %g seconds, throughput was %g values tested per second.\n", totalTime, throughput); return 0; }
bugged5.c
/****************************************************************************** * ЗАДАНИЕ: bugged5.c * ОПИСАНИЕ: * Используя секции, два потока инициализируют свои личные массивы * и затем добавляют свой массив в массивы соседа. * Почему-то происходит deadlock... ******************************************************************************/ #include <omp.h> #include <stdio.h> #include <stdlib.h> // Много памяти //#define N 1000000 #define N 100 #define PI 3.1415926535 #define DELTA .01415926535 int main (int argc, char *argv[]) { int nthreads, tid, i; float a[N], b[N]; omp_lock_t locka, lockb; omp_init_lock(&locka); omp_init_lock(&lockb); for (i = 0; i < N; i++) { a[i]=0; b[i]=0; } #pragma omp parallel shared(a, b, nthreads, locka, lockb) private(tid, i) { tid = omp_get_thread_num(); #pragma omp master { nthreads = omp_get_num_threads(); printf("Number of threads = %d\n", nthreads); } printf("Thread %d starting...\n", tid); #pragma omp barrier #pragma omp sections nowait { #pragma omp section { omp_set_lock(&locka); printf("Thread %d updating a[]\n", tid); for (i = 0; i < N; i++) a[i] += DELTA * i; omp_unset_lock(&locka); omp_set_lock(&lockb); // Deadlock. Освободим locka до того как займем lockb printf("Thread %d updating b[]\n", tid); for (i = 0; i < N; i++) b[i] += DELTA + i; omp_unset_lock(&lockb); //omp_unset_lock(&locka); } #pragma omp section { omp_set_lock(&lockb); printf("Thread %d updating b[]\n", tid); for (i = 0; i < N; i++) b[i] += PI * i; omp_unset_lock(&lockb); omp_set_lock(&locka); // Deadlock. Освободим lockb до того как займем locka. И уже после займем lockb printf("Thread %d adding b[] to a[]\n", tid); for (i = 0; i < N; i++) a[i] += PI + i; omp_unset_lock(&locka); //omp_unset_lock(&lockb); } } } //printf("Sample results: %f %f %f %f\n", a[0], b[0], a[999999], b[999999]); printf("Sample results: %f %f %f %f\n", a[0], b[0], a[N-1], b[N-1]); }
kmeans.c
/** @file kmeans.c ** @brief K-means - Declaration ** @author Andrea Vedaldi, David Novotny **/ /* Copyright (C) 2007-12 Andrea Vedaldi and Brian Fulkerson. Copyright (C) 2013 Andrea Vedaldi and David Novotny. All rights reserved. This file is part of the VLFeat library and is made available under the terms of the BSD license (see the COPYING file). */ /** <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @page kmeans K-means clustering @author Andrea Vedaldi @author David Novotny @tableofcontents <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @ref kmeans.h implements a number of algorithm for **K-means quantization**: Lloyd @cite{lloyd82least}, an accelerated version by Elkan @cite{elkan03using}, and a large scale algorithm based on Approximate Nearest Neighbors (ANN). All algorithms support @c float or @c double data and can use the $l^1$ or the $l^2$ distance for clustering. Furthermore, all algorithms can take advantage of multiple CPU cores. Please see @subpage kmeans-fundamentals for a technical description of K-means and of the algorithms implemented here. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-starting Getting started <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The goal of K-means is to partition a dataset into $K$ &ldquo;compact&rdquo; clusters. The following example demonstrates using @ref kmeans.h in the C programming language to partition @c numData @c float vectors into compute @c numCenters clusters using Lloyd's algorithm: @code #include <vl/kmeans.h> double energy ; double * centers ; // Use float data and the L2 distance for clustering KMeans * kmeans = vl_kmeans_new (VLDistanceL2, VL_TYPE_FLOAT) ; // Use Lloyd algorithm vl_kmeans_set_algorithm (kmeans, VlKMeansLloyd) ; // Initialize the cluster centers by randomly sampling the data vl_kmeans_init_centers_with_rand_data (kmeans, data, dimension, numData, numCenters) ; // Run at most 100 iterations of cluster refinement using Lloyd algorithm vl_kmeans_set_max_num_iterations (kmeans, 100) ; vl_kmeans_refine_centers (kmeans, data, numData) ; // Obtain the energy of the solution energy = vl_kmeans_get_energy(kmeans) ; // Obtain the cluster centers centers = vl_kmeans_get_centers(kmeans) ; @endcode Once the centers have been obtained, new data points can be assigned to clusters by using the ::vl_kmeans_quantize function: @code vl_uint32 * assignments = vl_malloc(sizeof(vl_uint32) * numData) ; float * distances = vl_malloc(sizeof(float) * numData) ; vl_kmeans_quantize(kmeans, assignments, distances, data, numData) ; @endcode Alternatively, one can directly assign new pointers to the closest centers, without bothering with a ::VlKMeans object. There are several considerations that may impact the performance of KMeans. First, since K-means is usually based local optimization algorithm, the **initialization method** is important. The following initialization methods are supported: Method | Function | Description ---------------|-----------------------------------------|----------------------------------------------- Random samples | ::vl_kmeans_init_centers_with_rand_data | Random data points K-means++ | ::vl_kmeans_init_centers_plus_plus | Random selection biased towards diversity Custom | ::vl_kmeans_set_centers | Choose centers (useful to run quantization only) See @ref kmeans-init for further details. The initialization methods use a randomized selection of the data points; the random number generator init is controlled by ::vl_rand_init. The second important choice is the **optimization algorithm**. The following optimization algorithms are supported: Algorithm | Symbol | See | Description ------------|------------------|-------------------|----------------------------------------------- Lloyd | ::VlKMeansLloyd | @ref kmeans-lloyd | Alternate EM-style optimization Elkan | ::VlKMeansElkan | @ref kmeans-elkan | A speedup using triangular inequalities ANN | ::VlKMeansANN | @ref kmeans-ann | A speedup using approximated nearest neighbors See the relative sections for further details. These algorithm are iterative, and stop when either a **maximum number of iterations** (::vl_kmeans_set_max_num_iterations) is reached, or when the energy changes sufficiently slowly in one iteration (::vl_kmeans_set_min_energy_variation). All the three algorithms support multithreaded computations. The number of threads used is usually controlled globally by ::vl_set_num_threads. **/ /** <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @page kmeans-fundamentals K-means fundamentals @tableofcontents <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> Given $n$ points $\bx_1,\dots,\bx_n \in \real^d$, the goal of K-means is find $K$ `centers` $\bc_1,\dots,\bc_m \in \real^d$ and `assignments` $q_1,\dots,q_n \in \{1,\dots,K\}$ of the points to the centers such that the sum of distances \[ E(\bc_1,\dots,\bc_k,q_1,\dots,q_n) = \sum_{i=1}^n \|\bx_i - \bc_{q_i} \|_p^p \] is minimized. $K$-means is obtained for the case $p=2$ ($l^2$ norm), because in this case the optimal centers are the means of the input vectors assigned to them. Here the generalization $p=1$ ($l^1$ norm) will also be considered. Up to normalization, the K-means objective $E$ is also the average reconstruction error if the original points are approximated with the cluster centers. Thus K-means is used not only to group the input points into cluster, but also to `quantize` their values. K-means is widely used in computer vision, for example in the construction of vocabularies of visual features (visual words). In these applications the number $n$ of points to cluster and/or the number $K$ of clusters is often large. Unfortunately, minimizing the objective $E$ is in general a difficult combinatorial problem, so locally optimal or approximated solutions are sought instead. The basic K-means algorithm alternate between re-estimating the centers and the assignments (@ref kmeans-lloyd). Combined with a good initialization strategy (@ref kmeans-init) and, potentially, by re-running the optimization from a number of randomized starting states, this algorithm may attain satisfactory solutions in practice. However, despite its simplicity, Lloyd's algorithm is often too slow. A good replacement is Elkan's algorithm (@ref kmeans-elkan), which uses the triangular inequality to cut down significantly the cost of Lloyd's algorithm. Since this algorithm is otherwise equivalent, it should often be preferred. For very large problems (millions of point to clusters and hundreds, thousands, or more clusters to find), even Elkan's algorithm is not sufficiently fast. In these cases, one can resort to a variant of Lloyd's algorithm that uses an approximated nearest neighbors routine (@ref kmeans-ann). <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-init Initialization methods <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> All the $K$-means algorithms considered here find locally optimal solutions; as such the way they are initialized is important. @ref kmeans.h supports the following initialization algorithms: @par Random data samples The simplest initialization method is to sample $K$ points at random from the input data and use them as initial values for the cluster centers. @par K-means++ @cite{arthur07k-means} proposes a randomized initialization of the centers which improves upon random selection. The first center $\bc_1$ is selected at random from the data points $\bx_1, \dots, \bx_n $ and the distance from this center to all points $\|\bx_i - \bc_1\|_p^p$ is computed. Then the second center $\bc_2$ is selected at random from the data points with probability proportional to the distance. The procedure is repeated to obtain the other centers by using the minimum distance to the centers collected so far. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-lloyd Lloyd's algorithm <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The most common K-means method is Lloyd's algorithm @cite{lloyd82least}. This algorithm is based on the observation that, while jointly optimizing clusters and assignment is difficult, optimizing one given the other is easy. Lloyd's algorithm alternates the steps: 1. **Quantization.** Each point $\bx_i$ is reassigned to the center $\bc_{q_j}$ closer to it. This requires finding for each point the closest among $K$ other points, which is potentially slow. 2. **Center estimation.** Each center $\bc_q$ is updated to minimize its average distances to the points assigned to it. It is easy to show that the best center is the mean or median of the points, respectively if the $l^2$ or $l^1$ norm is considered. A naive implementation of the assignment step requires $O(dnK)$ operations, where $d$ is the dimensionality of the data, $n$ the number of data points, and $K$ the number of centers. Updating the centers is much cheaper: $O(dn)$ operations suffice to compute the $K$ means and a slightly higher cost is required for the medians. Clearly, the bottleneck is the assignment computation, and this is what the other K-means algorithm try to improve. During the iterations, it can happen that a cluster becomes empty. In this case, K-means automatically **&ldquo;restarts&rdquo; the cluster** center by selecting a training point at random. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-elkan Elkan's algorithm <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> Elkan's algorithm @cite{elkan03using} is a variation of Lloyd alternate optimization algorithm (@ref kmeans-lloyd) that uses the triangular inequality to avoid many distance calculations when assigning points to clusters. While much faster than Lloyd, Elkan's method uses storage proportional to the umber of clusters by data points, which makes it unpractical for a very large number of clusters. The idea of this algorithm is that, if a center update does not move them much, then most of the point-to-center computations can be avoided when the point-to-center assignments are recomputed. To detect which distances need evaluation, the triangular inequality is used to lower and upper bound distances after a center update. Elkan algorithms uses two key observations. First, one has \[ \|\bx_i - \bc_{q_i}\|_p \leq \|\bc - \bc_{q_i}\|_p / 2 \quad\Rightarrow\quad \|\bx_i - \bc_{q_i}\|_p \leq \|\bx_i - \bc\|_p. \] Thus if the distance between $\bx_i$ and its current center $\bc_{q_i}$ is less than half the distance of the center $\bc_{q_i}$ to another center $\bc$, then $\bc$ can be skipped when the new assignment for $\bx_i$ is searched. Checking this requires keeping track of all the inter-center distances, but centers are typically a small fraction of the training data, so overall this can be a significant saving. In particular, if this condition is satisfied for all the centers $\bc \not= \bc_{q_i}$, the point $\bx_i$ can be skipped completely. Furthermore, the condition can be tested also based on an upper bound $UB_i$ of $\|\bx_i - \bc_{q_i}\|_p$. Second, if a center $\bc$ is updated to $\hat{\bc}$, then the new distance from $\bx$ to $\hat{\bc}$ is bounded from below and above by \[ \|\bx - \bc\|_p - \|bc - \hat\bc\|_p \leq \|\bx - \hat{\bc}\|_p \leq \|\bx - \hat{\bc}\|_p + \|\bc + \hat{\bc}\|_p. \] This allows to maintain an upper bound on the distance of $\bx_i$ to its current center $\bc_{q_i}$ and a lower bound to any other center $\bc$: @f{align*} UB_i & \leftarrow UB_i + \|\bc_{q_i} - \hat{\bc}_{q_i} \|_p \\ LB_i(\bc) & \leftarrow LB_i(\bc) - \|\bc -\hat \bc\|_p. @f} Thus the K-means algorithm becomes: 1. **Initialization.** Compute $LB_i(\bc) = \|\bx_i -\hat \bc\|_p$ for all points and centers. Find the current assignments $q_i$ and bounds $UB_i$ by finding the closest centers to each point: $UB_i = \min_{\bc} LB_i(\bc)$. 2. **Center estimation.** 1. Recompute all the centers based on the new means; call the updated version $\hat{\bc}$. 2. Update all the bounds based on the distance $\|\bc - \hat\bc\|_p$ as explained above. 3. Set $\bc \leftarrow \hat\bc$ for all the centers and go to the next iteration. 3. **Quantization.** 1. Skip any point $\bx_i$ such that $UB_i \leq \frac{1}{2} \|\bc_{q_i} - \bc\|_p$ for all centers $\bc \not= \bc_{q_i}$. 2. For each remaining point $\bx_i$ and center $\bc \not= \bc_{q_i}$: 1. Skip $\bc$ if \[ UB_i \leq \frac{1}{2} \| \bc_{q_i} - \bc \| \quad\text{or}\quad UB_i \leq LB_i(\bc). \] The first condition reflects the first observation above; the second uses the bounds to decide if $\bc$ can be closer than the current center $\bc_{q_i}$ to the point $\bx_i$. If the center cannot be skipped, continue as follows. 3. Skip $\bc$ if the condition above is satisfied after making the upper bound tight: \[ UB_i = LB_i(\bc_{q_i}) = \| \bx_i - \bc_{q_i} \|_p. \] Note that the latter calculation can be done only once for $\bx_i$. If the center cannot be skipped still, continue as follows. 4. Tighten the lower bound too: \[ LB_i(\bc) = \| \bx_i - \bc \|_p. \] At this point both $UB_i$ and $LB_i(\bc)$ are tight. If $LB_i < UB_i$, then the point $\bx_i$ should be reassigned to $\bc$. Update $q_i$ to the index of center $\bc$ and reset $UB_i = LB_i(\bc)$. <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> @section kmeans-ann ANN algorithm <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> The *Approximate Nearest Neighbor* (ANN) K-means algorithm @cite{beis97shape} @cite{silpa-anan08optimised} @cite{muja09fast} is a variant of Lloyd's algorithm (@ref kmeans-lloyd) uses a best-bin-first randomized KD-tree algorithm to approximately (and quickly) find the closest cluster center to each point. The KD-tree implementation is based on @ref kdtree. The algorithm can be summarized as follows: 1. **Quantization.** Each point $\bx_i$ is reassigned to the center $\bc_{q_j}$ closer to it. This starts by indexing the $K$ centers by a KD-tree and then using the latter to quickly find the closest center for every training point. The search is approximated to further improve speed. This opens up the possibility that a data point may receive an assignment that is *worse* than the current one. This is avoided by checking that the new assignment estimated by using ANN is an improvement; otherwise the old assignment is kept. 2. **Center estimation.** Each center $\bc_q$ is updated to minimize its average distances to the points assigned to it. It is easy to show that the best center is the mean or median of the points, respectively if the $l^2$ or $l^1$ norm is considered. The key is to trade-off carefully the speedup obtained by using the ANN algorithm and the loss in accuracy when retrieving neighbors. Due to the curse of dimensionality, KD-trees become less effective for higher dimensional data, so that the search cost, which in the best case is logarithmic with this data structure, may become effectively linear. This is somehow mitigated by the fact that new a new KD-tree is computed at each iteration, reducing the likelihood that points may get stuck with sub-optimal assignments. Experiments with the quantization of 128-dimensional SIFT features show that the ANN algorithm may use one quarter of the comparisons of Elkan's while retaining a similar solution accuracy. */ #include "kmeans.h" #include "generic.h" #include "mathop.h" #include <string.h> #ifdef _OPENMP #include <omp.h> #endif /* ================================================================ */ #ifndef VL_KMEANS_INSTANTIATING /** ------------------------------------------------------------------ ** @brief Reset state ** ** The function reset the state of the KMeans object. It deletes ** any stored centers, releasing the corresponding memory. This ** cancels the effect of seeding or setting the centers, but ** does not change the other configuration parameters. **/ VL_EXPORT void vl_kmeans_reset (VlKMeans * self) { self->numCenters = 0 ; self->dimension = 0 ; if (self->centers) vl_free(self->centers) ; if (self->centerDistances) vl_free(self->centerDistances) ; self->centers = NULL ; self->centerDistances = NULL ; } /** ------------------------------------------------------------------ ** @brief Create a new KMeans object ** @param dataType type of data (::VL_TYPE_FLOAT or ::VL_TYPE_DOUBLE) ** @param distance distance. ** @return new KMeans object instance. **/ VL_EXPORT VlKMeans * vl_kmeans_new (vl_type dataType, VlVectorComparisonType distance) { VlKMeans * self = vl_calloc(1, sizeof(VlKMeans)) ; self->algorithm = VlKMeansLloyd ; self->distance = distance ; self->dataType = dataType ; self->verbosity = 0 ; self->maxNumIterations = 100 ; self->minEnergyVariation = 1e-4 ; self->numRepetitions = 1 ; self->centers = NULL ; self->centerDistances = NULL ; self->numTrees = 3; self->maxNumComparisons = 100; vl_kmeans_reset (self) ; return self ; } /** ------------------------------------------------------------------ ** @brief Create a new KMeans object by copy ** @param kmeans KMeans object to copy. ** @return new copy. **/ VL_EXPORT VlKMeans * vl_kmeans_new_copy (VlKMeans const * kmeans) { VlKMeans * self = vl_malloc(sizeof(VlKMeans)) ; self->algorithm = kmeans->algorithm ; self->distance = kmeans->distance ; self->dataType = kmeans->dataType ; self->verbosity = kmeans->verbosity ; self->maxNumIterations = kmeans->maxNumIterations ; self->numRepetitions = kmeans->numRepetitions ; self->dimension = kmeans->dimension ; self->numCenters = kmeans->numCenters ; self->centers = NULL ; self->centerDistances = NULL ; self->numTrees = kmeans->numTrees; self->maxNumComparisons = kmeans->maxNumComparisons; if (kmeans->centers) { vl_size dataSize = vl_get_type_size(self->dataType) * self->dimension * self->numCenters ; self->centers = vl_malloc(dataSize) ; memcpy (self->centers, kmeans->centers, dataSize) ; } if (kmeans->centerDistances) { vl_size dataSize = vl_get_type_size(self->dataType) * self->numCenters * self->numCenters ; self->centerDistances = vl_malloc(dataSize) ; memcpy (self->centerDistances, kmeans->centerDistances, dataSize) ; } return self ; } /** ------------------------------------------------------------------ ** @brief Deletes a KMeans object ** @param self KMeans object instance. ** ** The function deletes the KMeans object instance created ** by ::vl_kmeans_new. **/ VL_EXPORT void vl_kmeans_delete (VlKMeans * self) { vl_kmeans_reset (self) ; vl_free (self) ; } /* an helper structure */ typedef struct _VlKMeansSortWrapper { vl_uint32 * permutation ; void const * data ; vl_size stride ; } VlKMeansSortWrapper ; /* ---------------------------------------------------------------- */ /* Instantiate shuffle algorithm */ #define VL_SHUFFLE_type vl_uindex #define VL_SHUFFLE_prefix _vl_kmeans #include "shuffle-def.h" /* #ifdef VL_KMEANS_INSTANTITATING */ #endif /* ================================================================ */ #ifdef VL_KMEANS_INSTANTIATING /* ---------------------------------------------------------------- */ /* Set centers */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_set_centers_, SFX) (VlKMeans * self, TYPE const * centers, vl_size dimension, vl_size numCenters) { self->dimension = dimension ; self->numCenters = numCenters ; self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ; memcpy ((TYPE*)self->centers, centers, sizeof(TYPE) * dimension * numCenters) ; } /* ---------------------------------------------------------------- */ /* Random seeding */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_init_centers_with_rand_data_, SFX) (VlKMeans * self, TYPE const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_uindex i, j, k ; VlRand * rand = vl_get_rand () ; self->dimension = dimension ; self->numCenters = numCenters ; self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ; { vl_uindex * perm = vl_malloc (sizeof(vl_uindex) * numData) ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif TYPE * distances = vl_malloc (sizeof(TYPE) * numCenters) ; /* get a random permutation of the data point */ for (i = 0 ; i < numData ; ++i) perm[i] = i ; _vl_kmeans_shuffle (perm, numData, rand) ; for (k = 0, i = 0 ; k < numCenters ; ++ i) { /* compare the next data point to all centers collected so far to detect duplicates (if there are enough left) */ if (numCenters - k < numData - i) { vl_bool duplicateDetected = VL_FALSE ; VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distances, dimension, data + dimension * perm[i], 1, (TYPE*)self->centers, k, distFn) ; for (j = 0 ; j < k ; ++j) { duplicateDetected |= (distances[j] == 0) ; } if (duplicateDetected) continue ; } /* ok, it is not a duplicate so we can accept it! */ memcpy ((TYPE*)self->centers + dimension * k, data + dimension * perm[i], sizeof(TYPE) * dimension) ; k ++ ; } vl_free(distances) ; vl_free(perm) ; } } /* ---------------------------------------------------------------- */ /* kmeans++ seeding */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_init_centers_plus_plus_, SFX) (VlKMeans * self, TYPE const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_uindex x, c ; VlRand * rand = vl_get_rand () ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; TYPE * minDistances = vl_malloc (sizeof(TYPE) * numData) ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif self->dimension = dimension ; self->numCenters = numCenters ; self->centers = vl_malloc (sizeof(TYPE) * dimension * numCenters) ; for (x = 0 ; x < numData ; ++x) { minDistances[x] = (TYPE) VL_INFINITY_D ; } /* select the first point at random */ x = vl_rand_uindex (rand, numData) ; c = 0 ; while (1) { TYPE energy = 0 ; TYPE acc = 0 ; TYPE thresh = (TYPE) vl_rand_real1 (rand) ; memcpy ((TYPE*)self->centers + c * dimension, data + x * dimension, sizeof(TYPE) * dimension) ; c ++ ; if (c == numCenters) break ; VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX) (distances, dimension, (TYPE*)self->centers + (c - 1) * dimension, 1, data, numData, distFn) ; for (x = 0 ; x < numData ; ++x) { minDistances[x] = VL_MIN(minDistances[x], distances[x]) ; energy += minDistances[x] ; } for (x = 0 ; x < numData - 1 ; ++x) { acc += minDistances[x] ; if (acc >= thresh * energy) break ; } } vl_free(distances) ; vl_free(minDistances) ; } /* ---------------------------------------------------------------- */ /* Quantization */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_quantize_, SFX) (VlKMeans * self, vl_uint32 * assignments, TYPE * distances, TYPE const * data, vl_size numData) { vl_index i ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif #ifdef _OPENMP #pragma omp parallel \ shared(self, distances, assignments, numData, distFn, data) \ num_threads(vl_get_max_threads()) #endif { /* vl_malloc cannot be used here if mapped to MATLAB malloc */ TYPE * distanceToCenters = malloc(sizeof(TYPE) * self->numCenters) ; #ifdef _OPENMP #pragma omp for #endif for (i = 0 ; i < (signed)numData ; ++i) { vl_uindex k ; TYPE bestDistance = (TYPE) VL_INFINITY_D ; VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(distanceToCenters, self->dimension, data + self->dimension * i, 1, (TYPE*)self->centers, self->numCenters, distFn) ; for (k = 0 ; k < self->numCenters ; ++k) { if (distanceToCenters[k] < bestDistance) { bestDistance = distanceToCenters[k] ; assignments[i] = (vl_uint32)k ; } } if (distances) distances[i] = bestDistance ; } free(distanceToCenters) ; } } /* ---------------------------------------------------------------- */ /* ANN quantization */ /* ---------------------------------------------------------------- */ static void VL_XCAT(_vl_kmeans_quantize_ann_, SFX) (VlKMeans * self, vl_uint32 * assignments, TYPE * distances, TYPE const * data, vl_size numData, vl_bool update) { #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif VlKDForest * forest = vl_kdforest_new(self->dataType,self->dimension,self->numTrees, self->distance) ; vl_kdforest_set_max_num_comparisons(forest,self->maxNumComparisons); vl_kdforest_set_thresholding_method(forest,VL_KDTREE_MEDIAN); vl_kdforest_build(forest,self->numCenters,self->centers); //note #pragma omp parallel default(none) #ifdef _OPENMP #pragma omp parallel \ num_threads(vl_get_max_threads()) \ shared(self, forest, update, assignments, distances, data, numData, distFn) #endif { VlKDForestNeighbor neighbor ; VlKDForestSearcher * searcher ; vl_index x; #ifdef _OPENMP #pragma omp critical #endif searcher = vl_kdforest_new_searcher (forest) ; #ifdef _OPENMP #pragma omp for #endif for(x = 0 ; x < (signed)numData ; ++x) { vl_kdforestsearcher_query (searcher, &neighbor, 1, (TYPE const *) (data + x*self->dimension)); if (distances) { if(!update) { distances[x] = (TYPE) neighbor.distance; assignments[x] = (vl_uint32) neighbor.index ; } else { TYPE prevDist = (TYPE) distFn(self->dimension, data + self->dimension * x, (TYPE*)self->centers + self->dimension *assignments[x]); if (prevDist > (TYPE) neighbor.distance) { distances[x] = (TYPE) neighbor.distance ; assignments[x] = (vl_uint32) neighbor.index ; } else { distances[x] = prevDist ; } } } else { assignments[x] = (vl_uint32) neighbor.index ; } } /* end for */ } /* end of parallel region */ vl_kdforest_delete(forest); } /* ---------------------------------------------------------------- */ /* Helper functions */ /* ---------------------------------------------------------------- */ /* The sorting routine is used to find increasing permutation of each * data dimension. This is used to quickly find the median for l1 * distance clustering. */ VL_INLINE TYPE VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp) (VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB) { return ((TYPE*)array->data) [array->permutation[indexA] * array->stride] - ((TYPE*)array->data) [array->permutation[indexB] * array->stride] ; } VL_INLINE void VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap) (VlKMeansSortWrapper * array, vl_uindex indexA, vl_uindex indexB) { vl_uint32 tmp = array->permutation[indexA] ; array->permutation[indexA] = array->permutation[indexB] ; array->permutation[indexB] = tmp ; } #define VL_QSORT_prefix VL_XCAT3(_vl_kmeans_, SFX, _qsort) #define VL_QSORT_array VlKMeansSortWrapper* #define VL_QSORT_cmp VL_XCAT3(_vl_kmeans_, SFX, _qsort_cmp) #define VL_QSORT_swap VL_XCAT3(_vl_kmeans_, SFX, _qsort_swap) #include "qsort-def.h" static void VL_XCAT(_vl_kmeans_sort_data_helper_, SFX) (VlKMeans * self, vl_uint32 * permutations, TYPE const * data, vl_size numData) { vl_uindex d, x ; for (d = 0 ; d < self->dimension ; ++d) { VlKMeansSortWrapper array ; array.permutation = permutations + d * numData ; array.data = data + d ; array.stride = self->dimension ; for (x = 0 ; x < numData ; ++x) { array.permutation[x] = (vl_uint32)x ; } VL_XCAT3(_vl_kmeans_, SFX, _qsort_sort)(&array, numData) ; } } /* ---------------------------------------------------------------- */ /* Lloyd refinement */ /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { vl_size c, d, x, iteration ; double previousEnergy = VL_INFINITY_D ; double initialEnergy = VL_INFINITY_D ; double energy ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ; vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ; vl_uint32 * permutations = NULL ; vl_size * numSeenSoFar = NULL ; VlRand * rand = vl_get_rand () ; vl_size totNumRestartedCenters = 0 ; vl_size numRestartedCenters = 0 ; if (self->distance == VlDistanceL1) { permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ; numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ; VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ; } for (energy = VL_INFINITY_D, iteration = 0; 1 ; ++ iteration) { /* assign data to cluters */ VL_XCAT(_vl_kmeans_quantize_, SFX)(self, assignments, distances, data, numData) ; /* compute energy */ energy = 0 ; for (x = 0 ; x < numData ; ++x) energy += distances[x] ; if (self->verbosity) { VL_PRINTF("kmeans: Lloyd iter %d: energy = %g\n", iteration, energy) ; } /* check termination conditions */ if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("kmeans: Lloyd terminating because maximum number of iterations reached\n") ; } break ; } if (energy == previousEnergy) { if (self->verbosity) { VL_PRINTF("kmeans: Lloyd terminating because the algorithm fully converged\n") ; } break ; } if (iteration == 0) { initialEnergy = energy ; } else { double eps = (previousEnergy - energy) / (initialEnergy - energy) ; if (eps < self->minEnergyVariation) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ; } break ; } } /* begin next iteration */ previousEnergy = energy ; /* update clusters */ memset(clusterMasses, 0, sizeof(vl_size) * numData) ; for (x = 0 ; x < numData ; ++x) { clusterMasses[assignments[x]] ++ ; } numRestartedCenters = 0 ; switch (self->distance) { case VlDistanceL2: memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ; for (x = 0 ; x < numData ; ++x) { TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ; TYPE const * xpt = data + x * self->dimension ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] += xpt[d] ; } } for (c = 0 ; c < self->numCenters ; ++c) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; if (clusterMasses[c] > 0) { TYPE mass = clusterMasses[c] ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] /= mass ; } } else { vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; case VlDistanceL1: for (d = 0 ; d < self->dimension ; ++d) { vl_uint32 * perm = permutations + d * numData ; memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ; for (x = 0; x < numData ; ++x) { c = assignments[perm[x]] ; if (2 * numSeenSoFar[c] < clusterMasses[c]) { ((TYPE*)self->centers) [d + c * self->dimension] = data [d + perm[x] * self->dimension] ; } numSeenSoFar[c] ++ ; } /* restart the centers as required */ for (c = 0 ; c < self->numCenters ; ++c) { if (clusterMasses[c] == 0) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } } break ; default: abort(); } /* done compute centers */ totNumRestartedCenters += numRestartedCenters ; if (self->verbosity && numRestartedCenters) { VL_PRINTF("kmeans: Lloyd iter %d: restarted %d centers\n", iteration, numRestartedCenters) ; } } /* next Lloyd iteration */ if (permutations) { vl_free(permutations) ; } if (numSeenSoFar) { vl_free(numSeenSoFar) ; } vl_free(distances) ; vl_free(assignments) ; vl_free(clusterMasses) ; return energy ; } static double VL_XCAT(_vl_kmeans_update_center_distances_, SFX) (VlKMeans * self) { #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif if (! self->centerDistances) { self->centerDistances = vl_malloc (sizeof(TYPE) * self->numCenters * self->numCenters) ; } VL_XCAT(vl_eval_vector_comparison_on_all_pairs_, SFX)(self->centerDistances, self->dimension, self->centers, self->numCenters, NULL, 0, distFn) ; return self->numCenters * (self->numCenters - 1) / 2 ; } static double VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { vl_size c, d, x, iteration ; double initialEnergy = VL_INFINITY_D ; double previousEnergy = VL_INFINITY_D ; double energy ; vl_uint32 * permutations = NULL ; vl_size * numSeenSoFar = NULL ; VlRand * rand = vl_get_rand () ; vl_size totNumRestartedCenters = 0 ; vl_size numRestartedCenters = 0 ; vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ; vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; if (self->distance == VlDistanceL1) { permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ; numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ; VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ; } for (energy = VL_INFINITY_D, iteration = 0; 1 ; ++ iteration) { /* assign data to cluters */ VL_XCAT(_vl_kmeans_quantize_ann_, SFX)(self, assignments, distances, data, numData, iteration > 0) ; /* compute energy */ energy = 0 ; for (x = 0 ; x < numData ; ++x) energy += distances[x] ; if (self->verbosity) { VL_PRINTF("kmeans: ANN iter %d: energy = %g\n", iteration, energy) ; } /* check termination conditions */ if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the maximum number of iterations has been reached\n") ; } break ; } if (energy == previousEnergy) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the algorithm fully converged\n") ; } break ; } if (iteration == 0) { initialEnergy = energy ; } else { double eps = (previousEnergy - energy) / (initialEnergy - energy) ; if (eps < self->minEnergyVariation) { if (self->verbosity) { VL_PRINTF("kmeans: ANN terminating because the energy relative variation was less than %f\n", self->minEnergyVariation) ; } break ; } } /* begin next iteration */ previousEnergy = energy ; /* update clusters */ memset(clusterMasses, 0, sizeof(vl_size) * numData) ; for (x = 0 ; x < numData ; ++x) { clusterMasses[assignments[x]] ++ ; } numRestartedCenters = 0 ; switch (self->distance) { case VlDistanceL2: memset(self->centers, 0, sizeof(TYPE) * self->dimension * self->numCenters) ; for (x = 0 ; x < numData ; ++x) { TYPE * cpt = (TYPE*)self->centers + assignments[x] * self->dimension ; TYPE const * xpt = data + x * self->dimension ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] += xpt[d] ; } } for (c = 0 ; c < self->numCenters ; ++c) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; if (clusterMasses[c] > 0) { TYPE mass = clusterMasses[c] ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] /= mass ; } } else { vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; case VlDistanceL1: for (d = 0 ; d < self->dimension ; ++d) { vl_uint32 * perm = permutations + d * numData ; memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ; for (x = 0; x < numData ; ++x) { c = assignments[perm[x]] ; if (2 * numSeenSoFar[c] < clusterMasses[c]) { ((TYPE*)self->centers) [d + c * self->dimension] = data [d + perm[x] * self->dimension] ; } numSeenSoFar[c] ++ ; } /* restart the centers as required */ for (c = 0 ; c < self->numCenters ; ++c) { if (clusterMasses[c] == 0) { TYPE * cpt = (TYPE*)self->centers + c * self->dimension ; vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } } break ; default: VL_PRINT("bad distance set: %d\n",self->distance); abort(); } /* done compute centers */ totNumRestartedCenters += numRestartedCenters ; if (self->verbosity && numRestartedCenters) { VL_PRINTF("kmeans: ANN iter %d: restarted %d centers\n", iteration, numRestartedCenters) ; } } if (permutations) { vl_free(permutations) ; } if (numSeenSoFar) { vl_free(numSeenSoFar) ; } vl_free(distances) ; vl_free(assignments) ; vl_free(clusterMasses) ; return energy ; } /* ---------------------------------------------------------------- */ /* Elkan refinement */ /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { vl_size d, iteration ; vl_index x ; vl_uint32 c, j ; vl_bool allDone ; TYPE * distances = vl_malloc (sizeof(TYPE) * numData) ; vl_uint32 * assignments = vl_malloc (sizeof(vl_uint32) * numData) ; vl_size * clusterMasses = vl_malloc (sizeof(vl_size) * numData) ; VlRand * rand = vl_get_rand () ; #if (FLT == VL_TYPE_FLOAT) VlFloatVectorComparisonFunction distFn = vl_get_vector_comparison_function_f(self->distance) ; #else VlDoubleVectorComparisonFunction distFn = vl_get_vector_comparison_function_d(self->distance) ; #endif TYPE * nextCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ; TYPE * pointToClosestCenterUB = vl_malloc (sizeof(TYPE) * numData) ; vl_bool * pointToClosestCenterUBIsStrict = vl_malloc (sizeof(vl_bool) * numData) ; TYPE * pointToCenterLB = vl_malloc (sizeof(TYPE) * numData * self->numCenters) ; TYPE * newCenters = vl_malloc(sizeof(TYPE) * self->dimension * self->numCenters) ; TYPE * centerToNewCenterDistances = vl_malloc (sizeof(TYPE) * self->numCenters) ; vl_uint32 * permutations = NULL ; vl_size * numSeenSoFar = NULL ; double energy ; vl_size totDistanceComputationsToInit = 0 ; vl_size totDistanceComputationsToRefreshUB = 0 ; vl_size totDistanceComputationsToRefreshLB = 0 ; vl_size totDistanceComputationsToRefreshCenterDistances = 0 ; vl_size totDistanceComputationsToNewCenters = 0 ; vl_size totDistanceComputationsToFinalize = 0 ; vl_size totNumRestartedCenters = 0 ; if (self->distance == VlDistanceL1) { permutations = vl_malloc(sizeof(vl_uint32) * numData * self->dimension) ; numSeenSoFar = vl_malloc(sizeof(vl_size) * self->numCenters) ; VL_XCAT(_vl_kmeans_sort_data_helper_, SFX)(self, permutations, data, numData) ; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Initialization */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* An iteration is: get_new_centers + reassign + get_energy. This counts as iteration 0, where get_new_centers is assumed to be performed before calling the train function by the initialization function */ /* update distances between centers */ totDistanceComputationsToInit += VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ; /* assigmen points to the initial centers and initialize bounds */ memset(pointToCenterLB, 0, sizeof(TYPE) * self->numCenters * numData) ; for (x = 0 ; x < (signed)numData ; ++x) { TYPE distance ; /* do the first center */ assignments[x] = 0 ; distance = distFn(self->dimension, data + x * self->dimension, (TYPE*)self->centers + 0) ; pointToClosestCenterUB[x] = distance ; pointToClosestCenterUBIsStrict[x] = VL_TRUE ; pointToCenterLB[0 + x * self->numCenters] = distance ; totDistanceComputationsToInit += 1 ; /* do other centers */ for (c = 1 ; c < self->numCenters ; ++c) { /* Can skip if the center assigned so far is twice as close as its distance to the center under consideration */ if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances) [c + assignments[x] * self->numCenters]) { continue ; } distance = distFn(self->dimension, data + x * self->dimension, (TYPE*)self->centers + c * self->dimension) ; pointToCenterLB[c + x * self->numCenters] = distance ; totDistanceComputationsToInit += 1 ; if (distance < pointToClosestCenterUB[x]) { pointToClosestCenterUB[x] = distance ; assignments[x] = c ; } } } /* compute UB on energy */ energy = 0 ; for (x = 0 ; x < (signed)numData ; ++x) { energy += pointToClosestCenterUB[x] ; } if (self->verbosity) { VL_PRINTF("kmeans: Elkan iter 0: energy = %g, dist. calc. = %d\n", energy, totDistanceComputationsToInit) ; } /* #define SANITY*/ #ifdef SANITY { int xx ; int cc ; TYPE tol = 1e-5 ; VL_PRINTF("inconsistencies after initial assignments:\n"); for (xx = 0 ; xx < numData ; ++xx) { for (cc = 0 ; cc < self->numCenters ; ++cc) { TYPE a = pointToCenterLB[cc + xx * self->numCenters] ; TYPE b = distFn(self->dimension, data + self->dimension * xx, (TYPE*)self->centers + self->dimension * cc) ; if (cc == assignments[xx]) { TYPE z = pointToClosestCenterUB[xx] ; if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n", cc, xx, z, b) ; } if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f\n", cc, xx, a, b) ; } } } #endif /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Iterations */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ for (iteration = 1 ; 1; ++iteration) { vl_size numDistanceComputationsToRefreshUB = 0 ; vl_size numDistanceComputationsToRefreshLB = 0 ; vl_size numDistanceComputationsToRefreshCenterDistances = 0 ; vl_size numDistanceComputationsToNewCenters = 0 ; vl_size numRestartedCenters = 0 ; /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Compute new centers */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ memset(clusterMasses, 0, sizeof(vl_size) * numData) ; for (x = 0 ; x < (signed)numData ; ++x) { clusterMasses[assignments[x]] ++ ; } switch (self->distance) { case VlDistanceL2: memset(newCenters, 0, sizeof(TYPE) * self->dimension * self->numCenters) ; for (x = 0 ; x < (signed)numData ; ++x) { TYPE * cpt = newCenters + assignments[x] * self->dimension ; TYPE const * xpt = data + x * self->dimension ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] += xpt[d] ; } } for (c = 0 ; c < self->numCenters ; ++c) { TYPE * cpt = newCenters + c * self->dimension ; if (clusterMasses[c] > 0) { TYPE mass = clusterMasses[c] ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] /= mass ; } } else { /* restart the center */ vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; case VlDistanceL1: for (d = 0 ; d < self->dimension ; ++d) { vl_uint32 * perm = permutations + d * numData ; memset(numSeenSoFar, 0, sizeof(vl_size) * self->numCenters) ; for (x = 0; x < (signed)numData ; ++x) { c = assignments[perm[x]] ; if (2 * numSeenSoFar[c] < clusterMasses[c]) { newCenters [d + c * self->dimension] = data [d + perm[x] * self->dimension] ; } numSeenSoFar[c] ++ ; } } /* restart the centers as required */ for (c = 0 ; c < self->numCenters ; ++c) { if (clusterMasses[c] == 0) { TYPE * cpt = newCenters + c * self->dimension ; vl_uindex x = vl_rand_uindex(rand, numData) ; numRestartedCenters ++ ; for (d = 0 ; d < self->dimension ; ++d) { cpt[d] = data[x * self->dimension + d] ; } } } break ; default: abort(); } /* done compute centers */ /* compute the distance from the old centers to the new centers */ for (c = 0 ; c < self->numCenters ; ++c) { TYPE distance = distFn(self->dimension, newCenters + c * self->dimension, (TYPE*)self->centers + c * self->dimension) ; centerToNewCenterDistances[c] = distance ; numDistanceComputationsToNewCenters += 1 ; } /* make the new centers current */ { TYPE * tmp = self->centers ; self->centers = newCenters ; newCenters = tmp ; } /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Reassign points to a centers */ /* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ /* Update distances between centers. */ numDistanceComputationsToRefreshCenterDistances += VL_XCAT(_vl_kmeans_update_center_distances_, SFX)(self) ; for (c = 0 ; c < self->numCenters ; ++c) { nextCenterDistances[c] = (TYPE) VL_INFINITY_D ; for (j = 0 ; j < self->numCenters ; ++j) { if (j == c) continue ; nextCenterDistances[c] = VL_MIN(nextCenterDistances[c], ((TYPE*)self->centerDistances) [j + c * self->numCenters]) ; } } /* Update upper bounds on point-to-closest-center distances based on the center variation. */ for (x = 0 ; x < (signed)numData ; ++x) { TYPE a = pointToClosestCenterUB[x] ; TYPE b = centerToNewCenterDistances[assignments[x]] ; if (self->distance == VlDistanceL1) { pointToClosestCenterUB[x] = a + b ; } else { #if (FLT == VL_TYPE_FLOAT) TYPE sqrtab = sqrtf (a * b) ; #else TYPE sqrtab = sqrt (a * b) ; #endif pointToClosestCenterUB[x] = a + b + 2.0 * sqrtab ; } pointToClosestCenterUBIsStrict[x] = VL_FALSE ; } /* Update lower bounds on point-to-center distances based on the center variation. */ #if defined(_OPENMP) #pragma omp parallel for default(shared) private(x,c) num_threads(vl_get_max_threads()) #endif for (x = 0 ; x < (signed)numData ; ++x) { for (c = 0 ; c < self->numCenters ; ++c) { TYPE a = pointToCenterLB[c + x * self->numCenters] ; TYPE b = centerToNewCenterDistances[c] ; if (a < b) { pointToCenterLB[c + x * self->numCenters] = 0 ; } else { if (self->distance == VlDistanceL1) { pointToCenterLB[c + x * self->numCenters] = a - b ; } else { #if (FLT == VL_TYPE_FLOAT) TYPE sqrtab = sqrtf (a * b) ; #else TYPE sqrtab = sqrt (a * b) ; #endif pointToCenterLB[c + x * self->numCenters] = a + b - 2.0 * sqrtab ; } } } } #ifdef SANITY { int xx ; int cc ; TYPE tol = 1e-5 ; VL_PRINTF("inconsistencies before assignments:\n"); for (xx = 0 ; xx < numData ; ++xx) { for (cc = 0 ; cc < self->numCenters ; ++cc) { TYPE a = pointToCenterLB[cc + xx * self->numCenters] ; TYPE b = distFn(self->dimension, data + self->dimension * xx, (TYPE*)self->centers + self->dimension * cc) ; if (cc == assignments[xx]) { TYPE z = pointToClosestCenterUB[xx] ; if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n", cc, xx, z, b) ; } if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n", cc, xx, a, b, assignments[xx]) ; } } } #endif /* Scan the data and do the reassignments. Use the bounds to skip as many point-to-center distance calculations as possible. */ allDone = VL_TRUE ; #if defined(_OPENMP) #pragma omp parallel for \ shared(self,numData, \ pointToClosestCenterUB,pointToCenterLB, \ nextCenterDistances,pointToClosestCenterUBIsStrict, \ assignments,data,distFn,allDone) \ private(c,x) \ reduction(+:numDistanceComputationsToRefreshUB,numDistanceComputationsToRefreshLB) \ num_threads(vl_get_max_threads()) #endif for (x = 0 ; x < (signed)numData ; ++ x) { /* A point x sticks with its current center assignmets[x] the UB to d(x, c[assigmnets[x]]) is not larger than half the distance of c[assigments[x]] to any other center c. */ if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= nextCenterDistances[assignments[x]]) { continue ; } for (c = 0 ; c < self->numCenters ; ++c) { vl_uint32 cx = assignments[x] ; TYPE distance ; /* The point is not reassigned to a given center c if either: 0 - c is already the assigned center 1 - The UB of d(x, c[assignments[x]]) is smaller than half the distance of c[assigments[x]] to c, OR 2 - The UB of d(x, c[assignmets[x]]) is smaller than the LB of the distance of x to c. */ if (cx == c) { continue ; } if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances) [c + cx * self->numCenters]) { continue ; } if (pointToClosestCenterUB[x] <= pointToCenterLB [c + x * self->numCenters]) { continue ; } /* If the UB is loose, try recomputing it and test again */ if (! pointToClosestCenterUBIsStrict[x]) { distance = distFn(self->dimension, data + self->dimension * x, (TYPE*)self->centers + self->dimension * cx) ; pointToClosestCenterUB[x] = distance ; pointToClosestCenterUBIsStrict[x] = VL_TRUE ; pointToCenterLB[cx + x * self->numCenters] = distance ; numDistanceComputationsToRefreshUB += 1 ; if (((self->distance == VlDistanceL1) ? 2.0 : 4.0) * pointToClosestCenterUB[x] <= ((TYPE*)self->centerDistances) [c + cx * self->numCenters]) { continue ; } if (pointToClosestCenterUB[x] <= pointToCenterLB [c + x * self->numCenters]) { continue ; } } /* Now the UB is strict (equal to d(x, assignments[x])), but we still could not exclude that x should be reassigned to c. We therefore compute the distance, update the LB, and check if a reassigmnet must be made */ distance = distFn(self->dimension, data + x * self->dimension, (TYPE*)self->centers + c * self->dimension) ; numDistanceComputationsToRefreshLB += 1 ; pointToCenterLB[c + x * self->numCenters] = distance ; if (distance < pointToClosestCenterUB[x]) { assignments[x] = c ; pointToClosestCenterUB[x] = distance ; allDone = VL_FALSE ; /* the UB strict flag is already set here */ } } /* assign center */ } /* next data point */ totDistanceComputationsToRefreshUB += numDistanceComputationsToRefreshUB ; totDistanceComputationsToRefreshLB += numDistanceComputationsToRefreshLB ; totDistanceComputationsToRefreshCenterDistances += numDistanceComputationsToRefreshCenterDistances ; totDistanceComputationsToNewCenters += numDistanceComputationsToNewCenters ; totNumRestartedCenters += numRestartedCenters ; #ifdef SANITY { int xx ; int cc ; TYPE tol = 1e-5 ; VL_PRINTF("inconsistencies after assignments:\n"); for (xx = 0 ; xx < numData ; ++xx) { for (cc = 0 ; cc < self->numCenters ; ++cc) { TYPE a = pointToCenterLB[cc + xx * self->numCenters] ; TYPE b = distFn(self->dimension, data + self->dimension * xx, (TYPE*)self->centers + self->dimension * cc) ; if (cc == assignments[xx]) { TYPE z = pointToClosestCenterUB[xx] ; if (z+tol<b) VL_PRINTF("UB %d %d = %f < %f\n", cc, xx, z, b) ; } if (a>b+tol) VL_PRINTF("LB %d %d = %f > %f (assign = %d)\n", cc, xx, a, b, assignments[xx]) ; } } } #endif /* compute UB on energy */ energy = 0 ; for (x = 0 ; x < (signed)numData ; ++x) { energy += pointToClosestCenterUB[x] ; } if (self->verbosity) { vl_size numDistanceComputations = numDistanceComputationsToRefreshUB + numDistanceComputationsToRefreshLB + numDistanceComputationsToRefreshCenterDistances + numDistanceComputationsToNewCenters ; VL_PRINTF("kmeans: Elkan iter %d: energy <= %g, dist. calc. = %d\n", iteration, energy, numDistanceComputations) ; if (numRestartedCenters) { VL_PRINTF("kmeans: Elkan iter %d: restarted %d centers\n", iteration, energy, numRestartedCenters) ; } if (self->verbosity > 1) { VL_PRINTF("kmeans: Elkan iter %d: total dist. calc. per type: " "UB: %.1f%% (%d), LB: %.1f%% (%d), " "intra_center: %.1f%% (%d), " "new_center: %.1f%% (%d)\n", iteration, 100.0 * numDistanceComputationsToRefreshUB / numDistanceComputations, numDistanceComputationsToRefreshUB, 100.0 *numDistanceComputationsToRefreshLB / numDistanceComputations, numDistanceComputationsToRefreshLB, 100.0 * numDistanceComputationsToRefreshCenterDistances / numDistanceComputations, numDistanceComputationsToRefreshCenterDistances, 100.0 * numDistanceComputationsToNewCenters / numDistanceComputations, numDistanceComputationsToNewCenters) ; } } /* check termination conditions */ if (iteration >= self->maxNumIterations) { if (self->verbosity) { VL_PRINTF("kmeans: Elkan terminating because maximum number of iterations reached\n") ; } break ; } if (allDone) { if (self->verbosity) { VL_PRINTF("kmeans: Elkan terminating because the algorithm fully converged\n") ; } break ; } } /* next Elkan iteration */ /* compute true energy */ energy = 0 ; for (x = 0 ; x < (signed)numData ; ++ x) { vl_uindex cx = assignments [x] ; energy += distFn(self->dimension, data + self->dimension * x, (TYPE*)self->centers + self->dimension * cx) ; totDistanceComputationsToFinalize += 1 ; } { vl_size totDistanceComputations = totDistanceComputationsToInit + totDistanceComputationsToRefreshUB + totDistanceComputationsToRefreshLB + totDistanceComputationsToRefreshCenterDistances + totDistanceComputationsToNewCenters + totDistanceComputationsToFinalize ; double saving = (double)totDistanceComputations / (iteration * self->numCenters * numData) ; if (self->verbosity) { VL_PRINTF("kmeans: Elkan: total dist. calc.: %d (%.2f %% of Lloyd)\n", totDistanceComputations, saving * 100.0) ; if (totNumRestartedCenters) { VL_PRINTF("kmeans: Elkan: there have been %d restarts\n", totNumRestartedCenters) ; } } if (self->verbosity > 1) { VL_PRINTF("kmeans: Elkan: total dist. calc. per type: " "init: %.1f%% (%d), UB: %.1f%% (%d), LB: %.1f%% (%d), " "intra_center: %.1f%% (%d), " "new_center: %.1f%% (%d), " "finalize: %.1f%% (%d)\n", 100.0 * totDistanceComputationsToInit / totDistanceComputations, totDistanceComputationsToInit, 100.0 * totDistanceComputationsToRefreshUB / totDistanceComputations, totDistanceComputationsToRefreshUB, 100.0 *totDistanceComputationsToRefreshLB / totDistanceComputations, totDistanceComputationsToRefreshLB, 100.0 * totDistanceComputationsToRefreshCenterDistances / totDistanceComputations, totDistanceComputationsToRefreshCenterDistances, 100.0 * totDistanceComputationsToNewCenters / totDistanceComputations, totDistanceComputationsToNewCenters, 100.0 * totDistanceComputationsToFinalize / totDistanceComputations, totDistanceComputationsToFinalize) ; } } if (permutations) { vl_free(permutations) ; } if (numSeenSoFar) { vl_free(numSeenSoFar) ; } vl_free(distances) ; vl_free(assignments) ; vl_free(clusterMasses) ; vl_free(nextCenterDistances) ; vl_free(pointToClosestCenterUB) ; vl_free(pointToClosestCenterUBIsStrict) ; vl_free(pointToCenterLB) ; vl_free(newCenters) ; vl_free(centerToNewCenterDistances) ; return energy ; } /* ---------------------------------------------------------------- */ static double VL_XCAT(_vl_kmeans_refine_centers_, SFX) (VlKMeans * self, TYPE const * data, vl_size numData) { switch (self->algorithm) { case VlKMeansLloyd: return VL_XCAT(_vl_kmeans_refine_centers_lloyd_, SFX)(self, data, numData) ; break ; case VlKMeansElkan: return VL_XCAT(_vl_kmeans_refine_centers_elkan_, SFX)(self, data, numData) ; break ; case VlKMeansANN: return VL_XCAT(_vl_kmeans_refine_centers_ann_, SFX)(self, data, numData) ; break ; default: abort() ; } } /* VL_KMEANS_INSTANTIATING */ #else #ifndef __DOXYGEN__ #define FLT VL_TYPE_FLOAT #define TYPE float #define SFX f #define VL_KMEANS_INSTANTIATING #include "kmeans.c" #define FLT VL_TYPE_DOUBLE #define TYPE double #define SFX d #define VL_KMEANS_INSTANTIATING #include "kmeans.c" #endif /* VL_KMEANS_INSTANTIATING */ #endif /* ================================================================ */ #ifndef VL_KMEANS_INSTANTIATING /** ------------------------------------------------------------------ ** @brief Set centers ** @param self KMeans object. ** @param centers centers to copy. ** @param dimension data dimension. ** @param numCenters number of centers. **/ VL_EXPORT void vl_kmeans_set_centers (VlKMeans * self, void const * centers, vl_size dimension, vl_size numCenters) { vl_kmeans_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_set_centers_f (self, (float const *)centers, dimension, numCenters) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_set_centers_d (self, (double const *)centers, dimension, numCenters) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief init centers by randomly sampling data ** @param self KMeans object. ** @param data data to sample from. ** @param dimension data dimension. ** @param numData nmber of data points. ** @param numCenters number of centers. ** ** The function inits the KMeans centers by randomly sampling ** the data @a data. **/ VL_EXPORT void vl_kmeans_init_centers_with_rand_data (VlKMeans * self, void const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_kmeans_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_init_centers_with_rand_data_f (self, (float const *)data, dimension, numData, numCenters) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_init_centers_with_rand_data_d (self, (double const *)data, dimension, numData, numCenters) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Seed centers by the KMeans++ algorithm ** @param self KMeans object. ** @param data data to sample from. ** @param dimension data dimension. ** @param numData nmber of data points. ** @param numCenters number of centers. **/ VL_EXPORT void vl_kmeans_init_centers_plus_plus (VlKMeans * self, void const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_kmeans_reset (self) ; switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_init_centers_plus_plus_f (self, (float const *)data, dimension, numData, numCenters) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_init_centers_plus_plus_d (self, (double const *)data, dimension, numData, numCenters) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Quantize data ** @param self KMeans object. ** @param assignments data to closest center assignments (output). ** @param distances data to closest center distance (output). ** @param data data to quantize. ** @param numData number of data points to quantize. **/ VL_EXPORT void vl_kmeans_quantize (VlKMeans * self, vl_uint32 * assignments, void * distances, void const * data, vl_size numData) { switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_quantize_f (self, assignments, distances, (float const *)data, numData) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_quantize_d (self, assignments, distances, (double const *)data, numData) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Quantize data using approximate nearest neighbours (ANN). ** @param self KMeans object. ** @param assignments data to centers assignments (output). ** @param distances data to closes center distance (output) ** @param data data to quantize. ** @param numData number of data points. ** @param update choose wether to update current assignments. ** ** The function uses an ANN procedure to compute the approximate ** nearest neighbours of the input data point. ** ** Setting @a update to ::VL_TRUE will cause the algorithm ** to *update existing assignments*. This means that each ** element of @a assignments and @a distances is updated ony if the ** ANN procedure can find a better assignment of the existing one. **/ VL_EXPORT void vl_kmeans_quantize_ann (VlKMeans * self, vl_uint32 * assignments, void * distances, void const * data, vl_size numData, vl_bool update) { switch (self->dataType) { case VL_TYPE_FLOAT : _vl_kmeans_quantize_ann_f (self, assignments, distances, (float const *)data, numData, update) ; break ; case VL_TYPE_DOUBLE : _vl_kmeans_quantize_ann_d (self, assignments, distances, (double const *)data, numData, update) ; break ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Refine center locations. ** @param self KMeans object. ** @param data data to quantize. ** @param numData number of data points. ** @return K-means energy at the end of optimization. ** ** The function calls the underlying K-means quantization algorithm ** (@ref VlKMeansAlgorithm) to quantize the specified data @a data. ** The function assumes that the cluster centers have already ** been assigned by using one of the seeding functions, or by ** setting them. **/ VL_EXPORT double vl_kmeans_refine_centers (VlKMeans * self, void const * data, vl_size numData) { assert (self->centers) ; switch (self->dataType) { case VL_TYPE_FLOAT : return _vl_kmeans_refine_centers_f (self, (float const *)data, numData) ; case VL_TYPE_DOUBLE : return _vl_kmeans_refine_centers_d (self, (double const *)data, numData) ; default: abort() ; } } /** ------------------------------------------------------------------ ** @brief Cluster data. ** @param self KMeans object. ** @param data data to quantize. ** @param dimension data dimension. ** @param numData number of data points. ** @param numCenters number of clusters. ** @return K-means energy at the end of optimization. ** ** The function initializes the centers by using the initialization ** algorithm set by ::vl_kmeans_set_initialization and refines them ** by the quantization algorithm set by ::vl_kmeans_set_algorithm. ** The process is repeated one or more times (see ** ::vl_kmeans_set_num_repetitions) and the resutl with smaller ** energy is retained. **/ VL_EXPORT double vl_kmeans_cluster (VlKMeans * self, void const * data, vl_size dimension, vl_size numData, vl_size numCenters) { vl_uindex repetition ; double bestEnergy = VL_INFINITY_D ; void * bestCenters = NULL ; for (repetition = 0 ; repetition < self->numRepetitions ; ++ repetition) { double energy ; double timeRef ; if (self->verbosity) { VL_PRINTF("kmeans: repetition %d of %d\n", repetition + 1, self->numRepetitions) ; } timeRef = vl_get_cpu_time() ; switch (self->initialization) { case VlKMeansRandomSelection : vl_kmeans_init_centers_with_rand_data (self, data, dimension, numData, numCenters) ; break ; case VlKMeansPlusPlus : vl_kmeans_init_centers_plus_plus (self, data, dimension, numData, numCenters) ; break ; default: abort() ; } if (self->verbosity) { VL_PRINTF("kmeans: K-means initialized in %.2f s\n", vl_get_cpu_time() - timeRef) ; } timeRef = vl_get_cpu_time () ; energy = vl_kmeans_refine_centers (self, data, numData) ; if (self->verbosity) { VL_PRINTF("kmeans: K-means terminated in %.2f s with energy %g\n", vl_get_cpu_time() - timeRef, energy) ; } /* copy centers to output if current solution is optimal */ /* check repetition == 0 as well in case energy = NaN, which */ /* can happen if the data contain NaNs */ if (energy < bestEnergy || repetition == 0) { void * temp ; bestEnergy = energy ; if (bestCenters == NULL) { bestCenters = vl_malloc(vl_get_type_size(self->dataType) * self->dimension * self->numCenters) ; } /* swap buffers */ temp = bestCenters ; bestCenters = self->centers ; self->centers = temp ; } /* better energy */ } /* next repetition */ vl_free (self->centers) ; self->centers = bestCenters ; return bestEnergy ; } /* VL_KMEANS_INSTANTIATING */ #endif #undef SFX #undef TYPE #undef FLT #undef VL_KMEANS_INSTANTIATING
fill_r_4c.c
/* Copyright 2014-2018 The PySCF Developers. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. * * Author: Qiming Sun <osirpt.sun@gmail.com> */ #include <stdlib.h> #include <stdio.h> #include <complex.h> #include "config.h" #include "cint.h" int GTOmax_cache_size(int (*intor)(), int *shls_slice, int ncenter, int *atm, int natm, int *bas, int nbas, double *env); /* * out[naoi,naoj,naok,comp] in F-order */ void GTOr4c_fill_s1(int (*intor)(), double complex *out, double *buf, int comp, int ish, int jsh, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int ksh0 = shls_slice[4]; const int ksh1 = shls_slice[5]; const int lsh0 = shls_slice[6]; const int lsh1 = shls_slice[7]; const size_t naoi = ao_loc[ish1] - ao_loc[ish0]; const size_t naoj = ao_loc[jsh1] - ao_loc[jsh0]; const size_t naok = ao_loc[ksh1] - ao_loc[ksh0]; const size_t naol = ao_loc[lsh1] - ao_loc[lsh0]; const size_t nij = naoi * naoj; const int dims[] = {naoi, naoj, naok, naol}; ish += ish0; jsh += jsh0; const int ip = ao_loc[ish] - ao_loc[ish0]; const int jp = ao_loc[jsh] - ao_loc[jsh0]; out += jp * naoi + ip; int ksh, lsh, k0, l0; int shls[4]; shls[0] = ish; shls[1] = jsh; for (ksh = ksh0; ksh < ksh1; ksh++) { for (lsh = lsh0; lsh < lsh1; lsh++) { shls[2] = ksh; shls[3] = lsh; k0 = ao_loc[ksh] - ao_loc[ksh0]; l0 = ao_loc[lsh] - ao_loc[lsh0]; (*intor)(out+(l0*naok+k0)*nij, dims, shls, atm, natm, bas, nbas, env, cintopt, buf); } } } void GTOr4c_drv(int (*intor)(), void (*fill)(), int (*prescreen)(), double complex *eri, int comp, int *shls_slice, int *ao_loc, CINTOpt *cintopt, int *atm, int natm, int *bas, int nbas, double *env) { const int ish0 = shls_slice[0]; const int ish1 = shls_slice[1]; const int jsh0 = shls_slice[2]; const int jsh1 = shls_slice[3]; const int nish = ish1 - ish0; const int njsh = jsh1 - jsh0; const int cache_size = GTOmax_cache_size(intor, shls_slice, 4, atm, natm, bas, nbas, env); #pragma omp parallel default(none) \ shared(intor, fill, eri, comp, shls_slice, ao_loc, cintopt, \ atm, natm, bas, nbas, env) { int ish, jsh, ij; double *buf = malloc(sizeof(double) * cache_size); #pragma omp for schedule(dynamic) for (ij = 0; ij < nish*njsh; ij++) { ish = ij / njsh; jsh = ij % njsh; (*fill)(intor, eri, buf, comp, ish, jsh, shls_slice, ao_loc, cintopt, atm, natm, bas, nbas, env); } free(buf); } }
PrefixScan.c
#include <stdio.h> #include <stdlib.h> #include <omp.h> #include "timer.h" #define MIN(a,b) ((a) < (b) ? (a) : (b)) void PrefixScan (int *input, int *output, int length); int main(int argc, char *argv[]){ #pragma omp parallel if (omp_get_thread_num() == 0) printf("Running with %d thread(s)\n",omp_get_num_threads()); struct timespec tstart; double time_serial, time_threaded; // large enough to force into main memory #define ARRAY_SIZE 8000000 int *input_serial, *input_threaded, *output_serial, *output_threaded; input_serial = (int *)malloc(ARRAY_SIZE*sizeof(int)); input_threaded = (int *)malloc(ARRAY_SIZE*sizeof(int)); output_serial = (int *)malloc(ARRAY_SIZE*sizeof(int)); output_threaded = (int *)malloc(ARRAY_SIZE*sizeof(int)); for (int i=0; i<ARRAY_SIZE; i++) { input_serial[i] = 1+i%2; } #pragma omp parallel for for (int i=0; i<ARRAY_SIZE; i++) { input_threaded[i] = 1+i%2; } cpu_timer_start(&tstart); PrefixScan(input_serial, output_serial, ARRAY_SIZE); time_serial += cpu_timer_stop(tstart); cpu_timer_start(&tstart); #pragma omp parallel PrefixScan(input_threaded, output_threaded, ARRAY_SIZE); time_threaded = cpu_timer_stop(tstart); printf("Runtime is for serial %lf threaded %lf speedup %lf msecs\n", time_serial, time_threaded, time_serial/time_threaded); free(input_serial); free(input_threaded); free(output_serial); free(output_threaded); } void PrefixScan (int *input, int *output, int length) { // Get the total number of threads and thread_id int nthreads = 1; int thread_id = 0; #ifdef _OPENMP nthreads = omp_get_num_threads(); thread_id = omp_get_thread_num(); #endif // Compute the range for which this thread is responsible. int tbegin = length * ( thread_id ) / nthreads; int tend = length * ( thread_id + 1 ) / nthreads; // Only perform this operation if there is a positive number of entries. if ( tbegin < tend ) { // Do an exclusive scan for each thread output[tbegin] = 0; for ( int i = tbegin + 1 ; i < tend ; i++ ) { output[i] = output[i-1] + input[i-1]; } } if (nthreads == 1) return; // Do adjustment to prefix scan for the beginning value for each thread #ifdef _OPENMP // Wait until all threads get here. #pragma omp barrier // On the master thread compute the beginning offset for each thread if (thread_id == 0) { for ( int i = 1 ; i < nthreads ; i ++ ) { int ibegin = length * ( i - 1 ) / nthreads; int iend = length * ( i ) / nthreads; if ( ibegin < iend ) output[iend] = output[ibegin] + input[iend-1]; if ( ibegin < iend - 1 ) output[iend] += output[iend-1]; } } #pragma omp barrier // Start all threads again // Apply the offset to the range for this thread. #pragma omp simd for ( int i = tbegin + 1 ; i < tend ; i++ ) { output[i] += output[tbegin]; } #endif }
mapped_max_pool.h
#ifndef MAPPED_MAX_POOL_H_ #define MAPPED_MAX_POOL_H_ #include <math.h> #include <omp.h> #include <torch/extension.h> #include <limits> #include "core/resample.h" #include "nn/common/mapped_max_pool.h" namespace mapped_conv { namespace nn { namespace cpu { template <typename T> void MappedMaxPool2D(const int num_kernels, torch::Tensor in_data, torch::Tensor sample_map, // OH x OW x K x 2 const int channels, const int in_height, const int in_width, const int out_height, const int out_width, const int kernel_size, const int interpolation, torch::Tensor out_data, torch::Tensor out_idx) // Indices of kernel sample in map { const T *in_data_ptr = in_data.data<T>(); const T *sample_map_ptr = sample_map.data<T>(); T *out_data_ptr = out_data.data<T>(); int64_t *out_idx_ptr = out_idx.data<int64_t>(); int index; #pragma omp parallel for shared(in_data_ptr, sample_map_ptr, out_data_ptr, \ out_idx_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::MappedMaxPool2D(index, in_data_ptr, sample_map_ptr, channels, in_height, in_width, out_height, out_width, kernel_size, interpolation, out_data_ptr, out_idx_ptr); } } template <typename T> void MappedMaxUnpool2D(const int num_kernels, torch::Tensor grad_output, torch::Tensor idx_mask, torch::Tensor sample_map, const int channels, const int orig_height, const int orig_width, const int pooled_height, const int pooled_width, const int kernel_size, const int interpolation, torch::Tensor grad_input) { const T *grad_output_ptr = grad_output.data<T>(); const int64_t *idx_mask_ptr = idx_mask.data<int64_t>(); const T *sample_map_ptr = sample_map.data<T>(); T *grad_input_ptr = grad_input.data<T>(); int index; #pragma omp parallel for shared( \ grad_output_ptr, idx_mask_ptr, sample_map_ptr, \ grad_input_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::MappedMaxUnpool2D(index, grad_output_ptr, idx_mask_ptr, sample_map_ptr, channels, orig_height, orig_width, pooled_height, pooled_width, kernel_size, interpolation, grad_input_ptr); } } // ------------------------------------------------- // ------------------------------------------------- template <typename T> void MappedMaxPool2DWeighted( const int num_kernels, torch::Tensor in_data, torch::Tensor sample_map, // OH x OW x K x P x 2 torch::Tensor interp_weights, // OH x OW x K x P const int channels, const int in_height, const int in_width, const int out_height, const int out_width, const int kernel_size, const int interpolation, const int num_interp_pts, torch::Tensor out_data, torch::Tensor out_idx) // Indices of kernel sample in map { const T *in_data_ptr = in_data.data<T>(); const T *sample_map_ptr = sample_map.data<T>(); const T *interp_weights_ptr = interp_weights.data<T>(); T *out_data_ptr = out_data.data<T>(); int64_t *out_idx_ptr = out_idx.data<int64_t>(); int index; #pragma omp parallel for shared(in_data_ptr, sample_map_ptr, \ interp_weights_ptr, out_data_ptr, \ out_idx_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::MappedMaxPool2DWeighted( index, in_data_ptr, sample_map_ptr, interp_weights_ptr, channels, in_height, in_width, out_height, out_width, kernel_size, interpolation, num_interp_pts, out_data_ptr, out_idx_ptr); } } template <typename T> void MappedMaxUnpool2DWeighted( const int num_kernels, torch::Tensor grad_output, torch::Tensor idx_mask, torch::Tensor sample_map, torch::Tensor interp_weights, const int channels, const int orig_height, const int orig_width, const int pooled_height, const int pooled_width, const int kernel_size, const int interpolation, const int num_interp_pts, torch::Tensor grad_input) { const T *grad_output_ptr = grad_output.data<T>(); const int64_t *idx_mask_ptr = idx_mask.data<int64_t>(); const T *sample_map_ptr = sample_map.data<T>(); const T *interp_weights_ptr = interp_weights.data<T>(); T *grad_input_ptr = grad_input.data<T>(); int index; #pragma omp parallel for shared( \ grad_output_ptr, idx_mask_ptr, sample_map_ptr, interp_weights_ptr, \ grad_input_ptr) private(index) schedule(static) for (index = 0; index < num_kernels; index++) { common::MappedMaxUnpool2DWeighted( index, grad_output_ptr, idx_mask_ptr, sample_map_ptr, interp_weights_ptr, channels, orig_height, orig_width, pooled_height, pooled_width, kernel_size, interpolation, num_interp_pts, grad_input_ptr); } } } // namespace cpu } // namespace nn } // namespace mapped_conv #endif
clipperz_srp_fmt_plug.c
/* This software was repurposed by Dhiru Kholia (dhiru at openwall.com) * in 2012. * * This software was written by Jim Fougeron jfoug AT cox dot net * in 2012. No copyright is claimed, and the software is hereby * placed in the public domain. In case this attempt to disclaim * copyright and place the software in the public domain is deemed * null and void, then the software is Copyright (c) 2012 Jim Fougeron * and it is hereby released to the general public under the following * terms: * * This software may be modified, redistributed, and used for any * purpose, in source and binary forms, with or without modification. * * Format was busted, just like wow-srp. It ONLY was handling binary residue * if the residue was exactly 64 hex bytes long. Well for exponentation, it * does not have to be 64 bytes. It can be shorter. We also handle case where * a shorter result number is 0 Lpadded to an even 64 bytes. split() should * be added to canonize these hashes, since they are same hash with * multiple representations. * * This implements the SRP protocol, with Clipperz documented * implementation specifics. * * s = random salt value. * * v is the 'verifier' value (256 bit value). * * Clipperz's offline database has following relevant fields, * * <script>_clipperz_dump_data_ = { ... * * '2f2134e38b23534adfcd43c2f7223caf3a53a8db7ce800f1e918e8e0d06b8b7a': { * s: 'e0bc11ee4db80a3ecabd293f5201cb747856361192c68f4133ea707c7d4d2d32', * v: 'e8be8c8d9c1d5dc79ecc7b15d1787d5b5dc22e815ddb0b37f6145ca667421f1f * version: '0.2', * ... * } * P algorithm: * h1 = hashlib.sha256(password + username).digest() * P = h2 = hashlib.sha256(h1).hexdigest() * * x algorithm: * x1 = hashlib.sha256(s + P).digest() * x = hashlib.sha256(x1).hexdigest() * * v algorithm: * v = Clipperz.Crypto.SRP.g().powerModule(new Clipperz.Crypto.BigInt(x,16),Clipperz.Crypto.SRP.n()); * n = 125617018995153554710546479714086468244499594888726646874671447258204721048803 * g = 2 */ #if FMT_EXTERNS_H extern struct fmt_main fmt_clipperz; #elif FMT_REGISTERS_H john_register_one(&fmt_clipperz); #else #if AC_BUILT /* need to know if HAVE_LIBGMP is set, for autoconfig build */ #include "autoconfig.h" #endif #include <string.h> #include "sha2.h" #include "arch.h" #include "params.h" #include "common.h" #include "formats.h" #ifdef HAVE_LIBGMP #if HAVE_GMP_GMP_H #include <gmp/gmp.h> #else #include <gmp.h> #endif #define EXP_STR " GMP-exp" #else #include <openssl/bn.h> #define EXP_STR " oSSL-exp" #endif #include "johnswap.h" #ifdef _OPENMP #include <omp.h> #ifndef OMP_SCALE #define OMP_SCALE 64 #endif #endif #include "memdbg.h" #define FORMAT_LABEL "Clipperz" #define FORMAT_NAME "SRP" #define ALGORITHM_NAME "SHA256 32/" ARCH_BITS_STR EXP_STR #define BENCHMARK_COMMENT "" #define BENCHMARK_LENGTH -1 #define CLIPPERZSIG "$clipperz$" #define CLIPPERZSIGLEN (sizeof(CLIPPERZSIG)-1) #define PLAINTEXT_LENGTH 16 #define CIPHERTEXT_LENGTH 65 #define BINARY_SIZE 33 #define BINARY_ALIGN 4 #define FULL_BINARY_SIZE 33 #define SALT_SIZE sizeof(struct custom_salt) #define SALT_ALIGN 1 #define USERNAMELEN 32 #define MIN_KEYS_PER_CRYPT 1 #define MAX_KEYS_PER_CRYPT 4 #define SZ 128 // salt is in hex (salt and salt2) static struct fmt_tests tests[] = { {CLIPPERZSIG"e8be8c8d9c1d5dc79ecc7b15d1787d5b5dc22e815ddb0b37f6145ca667421f1f$e0bc11ee4db80a3ecabd293f5201cb747856361192c68f4133ea707c7d4d2d32*hackme@mailinator.com", "openwall"}, {"$clipperz$05b18d6976d6cefad7c0c330c0c8a32ed69f19a8d68a94c3916c5ad1ba5ce37e5$RoljkWQajmS8OXFbsnqmZFTeB2How6hkoDd5QKu0DjthET3NmjTmOLumZe84nb7o*1", "password"}, {"$clipperz$5b18d6976d6cefad7c0c330c0c8a32ed69f19a8d68a94c3916c5ad1ba5ce37e5$RoljkWQajmS8OXFbsnqmZFTeB2How6hkoDd5QKu0DjthET3NmjTmOLumZe84nb7o*1", "password"}, {NULL} }; #ifdef HAVE_LIBGMP typedef struct t_SRP_CTX { mpz_t z_mod, z_base, z_exp, z_rop; } SRP_CTX; #else typedef struct t_SRP_CTX { BIGNUM *z_mod, *z_base, *z_exp, *z_rop; BN_CTX *BN_ctx; }SRP_CTX; #endif static SRP_CTX *pSRP_CTX; static char (*saved_key)[PLAINTEXT_LENGTH + 1]; // BN_bn2bin sometimes tries to write 33 bytes, hence allow some padding! // that is because these are mod 0x115B8B692E0E045692CF280B436735C77A5A9E8A9E7ED56C965F87DB5B2A2ECE3 // which is a 65 hex digit number (33 bytes long). static uint32_t (*crypt_out)[(FULL_BINARY_SIZE/4) + 1]; static struct custom_salt { unsigned char saved_salt[SZ]; unsigned char user_id[SZ]; } *cur_salt; static int max_keys_per_crypt; static void init(struct fmt_main *self) { int i; #if defined (_OPENMP) int omp_t = omp_get_max_threads(); self->params.min_keys_per_crypt *= omp_t; omp_t *= OMP_SCALE; self->params.max_keys_per_crypt *= omp_t; #endif saved_key = mem_calloc_align(sizeof(*saved_key), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); crypt_out = mem_calloc_align(sizeof(*crypt_out), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); pSRP_CTX = mem_calloc_align(sizeof(*pSRP_CTX), self->params.max_keys_per_crypt, MEM_ALIGN_WORD); max_keys_per_crypt = self->params.max_keys_per_crypt; for (i = 0; i < self->params.max_keys_per_crypt; ++i) { #ifdef HAVE_LIBGMP mpz_init_set_str(pSRP_CTX[i].z_mod, "125617018995153554710546479714086468244499594888726646874671447258204721048803", 10); mpz_init_set_str(pSRP_CTX[i].z_base, "2", 10); mpz_init_set_str(pSRP_CTX[i].z_exp, "1", 10); mpz_init(pSRP_CTX[i].z_rop); // Now, properly initialized mpz_exp, so it is 'large enough' to hold any SHA256 value // we need to put into it. Then we simply need to copy in the data, and possibly set // the limb count size. mpz_mul_2exp(pSRP_CTX[i].z_exp, pSRP_CTX[i].z_exp, 159); #else pSRP_CTX[i].z_mod=BN_new(); BN_dec2bn(&pSRP_CTX[i].z_mod, "125617018995153554710546479714086468244499594888726646874671447258204721048803"); pSRP_CTX[i].z_base=BN_new(); BN_set_word(pSRP_CTX[i].z_base, 2); pSRP_CTX[i].z_exp=BN_new(); pSRP_CTX[i].z_rop=BN_new(); pSRP_CTX[i].BN_ctx = BN_CTX_new(); #endif } } void done(void) { int i; for (i = 0; i < max_keys_per_crypt; ++i) { #ifdef HAVE_LIBGMP mpz_clear(pSRP_CTX[i].z_mod); mpz_clear(pSRP_CTX[i].z_base); mpz_clear(pSRP_CTX[i].z_exp); mpz_clear(pSRP_CTX[i].z_rop); #else BN_clear_free(pSRP_CTX[i].z_mod); BN_clear_free(pSRP_CTX[i].z_base); BN_clear_free(pSRP_CTX[i].z_exp); BN_clear_free(pSRP_CTX[i].z_rop); BN_CTX_free(pSRP_CTX[i].BN_ctx); #endif } MEM_FREE(pSRP_CTX); MEM_FREE(crypt_out); MEM_FREE(saved_key); } static int valid(char *ciphertext, struct fmt_main *self) { char *ctcopy; char *keeptr; char *p = NULL; if (strncmp(ciphertext, CLIPPERZSIG, CLIPPERZSIGLEN)) return 0; ctcopy = strdup(ciphertext); keeptr = ctcopy; ctcopy += CLIPPERZSIGLEN; if ((p = strtokm(ctcopy, "$")) == NULL) goto err; if (strlen(p) > CIPHERTEXT_LENGTH) goto err; if (!ishex_oddOK(p)) goto err; if ((p = strtokm(NULL, "*")) == NULL) goto err; if (strlen(p) > SZ-1) goto err; if ((p = strtokm(NULL, "*")) == NULL) goto err; if (strlen(p) > SZ-1) goto err; if ((p = strtokm(NULL, "*"))) goto err; MEM_FREE(keeptr); return 1; err: MEM_FREE(keeptr); return 0; } static char *split(char *ciphertext, int index, struct fmt_main *pFmt) { static char ct[128+2*SZ+1]; char *cp; if (strncmp(ciphertext, CLIPPERZSIG, CLIPPERZSIGLEN)) return ciphertext; strnzcpy(ct, ciphertext, sizeof(ct)); cp = strchr(&ct[CLIPPERZSIGLEN], '$'); if (!cp) return ciphertext; *cp = 0; strlwr(&ct[CLIPPERZSIGLEN]); *cp = '$'; if (ct[CLIPPERZSIGLEN] == '0') { char *cpi = &ct[CLIPPERZSIGLEN]; char *cpo = cpi; while (*cpi == '0') ++cpi; do { *cpo++ = *cpi; } while (*cpi++); } return ct; } static void *get_binary(char *ciphertext) { static union { unsigned char c[FULL_BINARY_SIZE]; uint32_t dummy[1]; } buf; unsigned char *out = buf.c; char *p, *q; int i; p = &ciphertext[CLIPPERZSIGLEN]; q = strchr(p, '$'); memset(buf.c, 0, sizeof(buf)); while (*p == '0') ++p; if ((q-p)&1) { out[0] = atoi16[ARCH_INDEX(*p)]; ++p; } else { out[0] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } for (i = 1; i < FULL_BINARY_SIZE; i++) { out[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; if (p >= q) break; } return out; } static void *get_salt(char *ciphertext) { char *p; char *q; static struct custom_salt cs; memset(&cs, 0, sizeof(cs)); p = ciphertext; p = strchr(&ciphertext[CLIPPERZSIGLEN], '$') + 1; q = strrchr(ciphertext, '*'); strncpy((char*)cs.saved_salt, p, q - p); p = strrchr(ciphertext, '*') + 1; strcpy((char*)cs.user_id, p); return (void *)&cs; } #define COMMON_GET_HASH_VAR crypt_out #include "common-get-hash.h" static int salt_hash(void *salt) { unsigned int hash = 0; char *p = (char *)salt; while (*p) { hash <<= 1; hash += (unsigned char)*p++; if (hash >> SALT_HASH_LOG) { hash ^= hash >> SALT_HASH_LOG; hash &= (SALT_HASH_SIZE - 1); } } hash ^= hash >> SALT_HASH_LOG; hash &= (SALT_HASH_SIZE - 1); return hash; } static void set_salt(void *salt) { cur_salt = (struct custom_salt *)salt; } static void set_key(char *key, int index) { strnzcpy(saved_key[index], key, PLAINTEXT_LENGTH+1); } static char *get_key(int index) { return saved_key[index]; } inline static void hex_encode(unsigned char *str, int len, unsigned char *out) { int i; for (i = 0; i < len; ++i) { out[0] = itoa16[str[i]>>4]; out[1] = itoa16[str[i]&0xF]; out += 2; } } static int crypt_all(int *pcount, struct db_salt *salt) { const int count = *pcount; int j; #ifdef _OPENMP #pragma omp parallel for #endif for (j = 0; j < count; ++j) { SHA256_CTX ctx; unsigned char Tmp[32]; unsigned char TmpHex[64]; memset(crypt_out[j], 0, sizeof(crypt_out[j])); SHA256_Init(&ctx); SHA256_Update(&ctx, saved_key[j], strlen(saved_key[j])); SHA256_Update(&ctx, cur_salt->user_id, strlen((char*)cur_salt->user_id)); SHA256_Final(Tmp, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, Tmp, 32); SHA256_Final(Tmp, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, cur_salt->saved_salt, strlen((char*)cur_salt->saved_salt)); hex_encode(Tmp, 32, TmpHex); SHA256_Update(&ctx, TmpHex, 64); SHA256_Final(Tmp, &ctx); SHA256_Init(&ctx); SHA256_Update(&ctx, Tmp, 32); SHA256_Final(Tmp, &ctx); #ifdef HAVE_LIBGMP { unsigned char HashStr[80], *p; int i, todo; p = HashStr; for (i = 0; i < 32; ++i) { *p++ = itoa16[Tmp[i]>>4]; *p++ = itoa16[Tmp[i]&0xF]; } *p = 0; mpz_set_str(pSRP_CTX[j].z_exp, (char*)HashStr, 16); mpz_powm (pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod ); mpz_get_str ((char*)HashStr, 16, pSRP_CTX[j].z_rop); p = HashStr; todo = strlen((char*)p); if (todo&1) { ((unsigned char*)(crypt_out[j]))[0] = atoi16[ARCH_INDEX(*p)]; ++p; --todo; } else { ((unsigned char*)(crypt_out[j]))[0] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; todo -= 2; } todo >>= 1; for (i = 1; i <= todo; i++) { ((unsigned char*)(crypt_out[j]))[i] = (atoi16[ARCH_INDEX(*p)] << 4) | atoi16[ARCH_INDEX(p[1])]; p += 2; } } #else // using oSSL's BN to do expmod. pSRP_CTX[j].z_exp = BN_bin2bn(Tmp,32,pSRP_CTX[j].z_exp); BN_mod_exp(pSRP_CTX[j].z_rop, pSRP_CTX[j].z_base, pSRP_CTX[j].z_exp, pSRP_CTX[j].z_mod, pSRP_CTX[j].BN_ctx); BN_bn2bin(pSRP_CTX[j].z_rop, (unsigned char*)(crypt_out[j])); #endif } return count; } static int cmp_all(void *binary, int count) { int i; for (i = 0; i < count; ++i) { if (*((uint32_t*)binary) == *((uint32_t*)(crypt_out[i]))) return 1; } return 0; } static int cmp_one(void *binary, int index) { return *((uint32_t*)binary) == *((uint32_t*)(crypt_out[index])); } static int cmp_exact(char *source, int index) { return !memcmp(get_binary(source), crypt_out[index], BINARY_SIZE); } struct fmt_main fmt_clipperz = { { FORMAT_LABEL, FORMAT_NAME, ALGORITHM_NAME, BENCHMARK_COMMENT, BENCHMARK_LENGTH, 0, PLAINTEXT_LENGTH, BINARY_SIZE, BINARY_ALIGN, SALT_SIZE, SALT_ALIGN, MIN_KEYS_PER_CRYPT, MAX_KEYS_PER_CRYPT, FMT_CASE | FMT_8_BIT | FMT_SPLIT_UNIFIES_CASE | FMT_OMP, { NULL }, { CLIPPERZSIG }, tests }, { init, done, fmt_default_reset, fmt_default_prepare, valid, split, get_binary, get_salt, { NULL }, fmt_default_source, { fmt_default_binary_hash_0, fmt_default_binary_hash_1, fmt_default_binary_hash_2, fmt_default_binary_hash_3, fmt_default_binary_hash_4, fmt_default_binary_hash_5, fmt_default_binary_hash_6 }, salt_hash, NULL, set_salt, set_key, get_key, fmt_default_clear_keys, crypt_all, { #define COMMON_GET_HASH_LINK #include "common-get-hash.h" }, cmp_all, cmp_one, cmp_exact } }; #endif /* plugin stanza */
big_vm.c
/* * big_vm.c * * Program to allocate a huge VM space to test how allocated memory affects * core dump size. * * This program is meant to be run in gdb with breakpoints on various lines * where the user can then force a core dump and see what it contains. * * The idea is looking at core dump size as memory is allocated, touched, * and then freed. * * Copyright (c) 2017 James Klassen * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies of this Software or works derived from this Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * */ #include<stdio.h> #include<stdlib.h> int main() { size_t a_size = 4LL*1024*1024*1024; int *array = (int*)malloc(sizeof(int)*a_size); #pragma omp parallel for for(size_t i = 0; i < a_size; i++) { array[i] = (int)i; } free(array); printf("break here\n"); return 0; }
threshold.c
/* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % TTTTT H H RRRR EEEEE SSSSS H H OOO L DDDD % % T H H R R E SS H H O O L D D % % T HHHHH RRRR EEE SSS HHHHH O O L D D % % T H H R R E SS H H O O L D D % % T H H R R EEEEE SSSSS H H OOO LLLLL DDDD % % % % % % MagickCore Image Threshold Methods % % % % Software Design % % Cristy % % October 1996 % % % % % % Copyright 1999-2018 ImageMagick Studio LLC, a non-profit organization % % dedicated to making software imaging solutions freely available. % % % % You may not use this file except in compliance with the License. You may % % obtain a copy of the License at % % % % https://imagemagick.org/script/license.php % % % % Unless required by applicable law or agreed to in writing, software % % distributed under the License is distributed on an "AS IS" BASIS, % % WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. % % See the License for the specific language governing permissions and % % limitations under the License. % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % */ /* Include declarations. */ #include "MagickCore/studio.h" #include "MagickCore/property.h" #include "MagickCore/blob.h" #include "MagickCore/cache-view.h" #include "MagickCore/color.h" #include "MagickCore/color-private.h" #include "MagickCore/colormap.h" #include "MagickCore/colorspace.h" #include "MagickCore/colorspace-private.h" #include "MagickCore/configure.h" #include "MagickCore/constitute.h" #include "MagickCore/decorate.h" #include "MagickCore/draw.h" #include "MagickCore/enhance.h" #include "MagickCore/exception.h" #include "MagickCore/exception-private.h" #include "MagickCore/effect.h" #include "MagickCore/fx.h" #include "MagickCore/gem.h" #include "MagickCore/geometry.h" #include "MagickCore/image-private.h" #include "MagickCore/list.h" #include "MagickCore/log.h" #include "MagickCore/memory_.h" #include "MagickCore/memory-private.h" #include "MagickCore/monitor.h" #include "MagickCore/monitor-private.h" #include "MagickCore/montage.h" #include "MagickCore/option.h" #include "MagickCore/pixel-accessor.h" #include "MagickCore/pixel-private.h" #include "MagickCore/quantize.h" #include "MagickCore/quantum.h" #include "MagickCore/quantum-private.h" #include "MagickCore/random_.h" #include "MagickCore/random-private.h" #include "MagickCore/resize.h" #include "MagickCore/resource_.h" #include "MagickCore/segment.h" #include "MagickCore/shear.h" #include "MagickCore/signature-private.h" #include "MagickCore/string_.h" #include "MagickCore/string-private.h" #include "MagickCore/thread-private.h" #include "MagickCore/threshold.h" #include "MagickCore/token.h" #include "MagickCore/transform.h" #include "MagickCore/xml-tree.h" #include "MagickCore/xml-tree-private.h" /* Define declarations. */ #define ThresholdsFilename "thresholds.xml" /* Typedef declarations. */ struct _ThresholdMap { char *map_id, *description; size_t width, height; ssize_t divisor, *levels; }; /* Static declarations. */ static const char *MinimalThresholdMap = "<?xml version=\"1.0\"?>" "<thresholds>" " <threshold map=\"threshold\" alias=\"1x1\">" " <description>Threshold 1x1 (non-dither)</description>" " <levels width=\"1\" height=\"1\" divisor=\"2\">" " 1" " </levels>" " </threshold>" " <threshold map=\"checks\" alias=\"2x1\">" " <description>Checkerboard 2x1 (dither)</description>" " <levels width=\"2\" height=\"2\" divisor=\"3\">" " 1 2" " 2 1" " </levels>" " </threshold>" "</thresholds>"; /* Forward declarations. */ static ThresholdMap *GetThresholdMapFile(const char *,const char *,const char *,ExceptionInfo *); /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A d a p t i v e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AdaptiveThresholdImage() selects an individual threshold for each pixel % based on the range of intensity values in its local neighborhood. This % allows for thresholding of an image whose global intensity histogram % doesn't contain distinctive peaks. % % The format of the AdaptiveThresholdImage method is: % % Image *AdaptiveThresholdImage(const Image *image,const size_t width, % const size_t height,const double bias,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o width: the width of the local neighborhood. % % o height: the height of the local neighborhood. % % o bias: the mean bias. % % o exception: return any errors or warnings in this structure. % */ MagickExport Image *AdaptiveThresholdImage(const Image *image, const size_t width,const size_t height,const double bias, ExceptionInfo *exception) { #define AdaptiveThresholdImageTag "AdaptiveThreshold/Image" CacheView *image_view, *threshold_view; Image *threshold_image; MagickBooleanType status; MagickOffsetType progress; MagickSizeType number_pixels; ssize_t y; /* Initialize threshold image attributes. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); threshold_image=CloneImage(image,0,0,MagickTrue, exception); if (threshold_image == (Image *) NULL) return((Image *) NULL); status=SetImageStorageClass(threshold_image,DirectClass,exception); if (status == MagickFalse) { threshold_image=DestroyImage(threshold_image); return((Image *) NULL); } /* Threshold image. */ status=MagickTrue; progress=0; number_pixels=(MagickSizeType) width*height; image_view=AcquireVirtualCacheView(image,exception); threshold_view=AcquireAuthenticCacheView(threshold_image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,threshold_image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { double channel_bias[MaxPixelChannels], channel_sum[MaxPixelChannels]; register const Quantum *magick_restrict p, *magick_restrict pixels; register Quantum *magick_restrict q; register ssize_t i, x; ssize_t center, u, v; if (status == MagickFalse) continue; p=GetCacheViewVirtualPixels(image_view,-((ssize_t) width/2L),y-(ssize_t) (height/2L),image->columns+width,height,exception); q=QueueCacheViewAuthenticPixels(threshold_view,0,y,threshold_image->columns, 1,exception); if ((p == (const Quantum *) NULL) || (q == (Quantum *) NULL)) { status=MagickFalse; continue; } center=(ssize_t) GetPixelChannels(image)*(image->columns+width)*(height/2L)+ GetPixelChannels(image)*(width/2); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } pixels=p; channel_bias[channel]=0.0; channel_sum[channel]=0.0; for (v=0; v < (ssize_t) height; v++) { for (u=0; u < (ssize_t) width; u++) { if (u == (ssize_t) (width-1)) channel_bias[channel]+=pixels[i]; channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image); } pixels+=GetPixelChannels(image)*image->columns; } } for (x=0; x < (ssize_t) image->columns; x++) { for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double mean; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); PixelTrait threshold_traits=GetPixelChannelTraits(threshold_image, channel); if ((traits == UndefinedPixelTrait) || (threshold_traits == UndefinedPixelTrait)) continue; if ((threshold_traits & CopyPixelTrait) != 0) { SetPixelChannel(threshold_image,channel,p[center+i],q); continue; } channel_sum[channel]-=channel_bias[channel]; channel_bias[channel]=0.0; pixels=p; for (v=0; v < (ssize_t) height; v++) { channel_bias[channel]+=pixels[i]; pixels+=(width-1)*GetPixelChannels(image); channel_sum[channel]+=pixels[i]; pixels+=GetPixelChannels(image)*(image->columns+1); } mean=(double) (channel_sum[channel]/number_pixels+bias); SetPixelChannel(threshold_image,channel,(Quantum) ((double) p[center+i] <= mean ? 0 : QuantumRange),q); } p+=GetPixelChannels(image); q+=GetPixelChannels(threshold_image); } if (SyncCacheViewAuthenticPixels(threshold_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_AdaptiveThresholdImage) #endif proceed=SetImageProgress(image,AdaptiveThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } threshold_image->type=image->type; threshold_view=DestroyCacheView(threshold_view); image_view=DestroyCacheView(image_view); if (status == MagickFalse) threshold_image=DestroyImage(threshold_image); return(threshold_image); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % A u t o T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % AutoThresholdImage() automatically selects a threshold and replaces each % pixel in the image with a black pixel if the image intentsity is less than % the selected threshold otherwise white. % % The format of the AutoThresholdImage method is: % % MagickBooleanType AutoThresholdImage(Image *image, % const AutoThresholdMethod method,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: The image to auto-threshold. % % o method: choose from Kapur, OTSU, or Triangle. % % o exception: return any errors or warnings in this structure. % */ static double KapurThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { #define MaxIntensity 255 double *black_entropy, *cumulative_histogram, entropy, epsilon, maximum_entropy, *white_entropy; register ssize_t i, j; size_t threshold; /* Compute optimal threshold from the entopy of the histogram. */ cumulative_histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*cumulative_histogram)); black_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*black_entropy)); white_entropy=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*white_entropy)); if ((cumulative_histogram == (double *) NULL) || (black_entropy == (double *) NULL) || (white_entropy == (double *) NULL)) { if (white_entropy != (double *) NULL) white_entropy=(double *) RelinquishMagickMemory(white_entropy); if (black_entropy != (double *) NULL) black_entropy=(double *) RelinquishMagickMemory(black_entropy); if (cumulative_histogram != (double *) NULL) cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Entropy for black and white parts of the histogram. */ cumulative_histogram[0]=histogram[0]; for (i=1; i <= MaxIntensity; i++) cumulative_histogram[i]=cumulative_histogram[i-1]+histogram[i]; epsilon=MagickMinimumValue; for (j=0; j <= MaxIntensity; j++) { /* Black entropy. */ black_entropy[j]=0.0; if (cumulative_histogram[j] > epsilon) { entropy=0.0; for (i=0; i <= j; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/cumulative_histogram[j]* log(histogram[i]/cumulative_histogram[j]); black_entropy[j]=entropy; } /* White entropy. */ white_entropy[j]=0.0; if ((1.0-cumulative_histogram[j]) > epsilon) { entropy=0.0; for (i=j+1; i <= MaxIntensity; i++) if (histogram[i] > epsilon) entropy-=histogram[i]/(1.0-cumulative_histogram[j])* log(histogram[i]/(1.0-cumulative_histogram[j])); white_entropy[j]=entropy; } } /* Find histogram bin with maximum entropy. */ maximum_entropy=black_entropy[0]+white_entropy[0]; threshold=0; for (j=1; j <= MaxIntensity; j++) if ((black_entropy[j]+white_entropy[j]) > maximum_entropy) { maximum_entropy=black_entropy[j]+white_entropy[j]; threshold=(size_t) j; } /* Free resources. */ white_entropy=(double *) RelinquishMagickMemory(white_entropy); black_entropy=(double *) RelinquishMagickMemory(black_entropy); cumulative_histogram=(double *) RelinquishMagickMemory(cumulative_histogram); return(100.0*threshold/MaxIntensity); } static double OTSUThreshold(const Image *image,const double *histogram, ExceptionInfo *exception) { double max_sigma, *myu, *omega, *probability, *sigma, threshold; register ssize_t i; /* Compute optimal threshold from maximization of inter-class variance. */ myu=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*myu)); omega=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*omega)); probability=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*probability)); sigma=(double *) AcquireQuantumMemory(MaxIntensity+1UL,sizeof(*sigma)); if ((myu == (double *) NULL) || (omega == (double *) NULL) || (probability == (double *) NULL) || (sigma == (double *) NULL)) { if (sigma != (double *) NULL) sigma=(double *) RelinquishMagickMemory(sigma); if (probability != (double *) NULL) probability=(double *) RelinquishMagickMemory(probability); if (omega != (double *) NULL) omega=(double *) RelinquishMagickMemory(omega); if (myu != (double *) NULL) myu=(double *) RelinquishMagickMemory(myu); (void) ThrowMagickException(exception,GetMagickModule(), ResourceLimitError,"MemoryAllocationFailed","`%s'",image->filename); return(-1.0); } /* Calculate probability density. */ for (i=0; i <= (ssize_t) MaxIntensity; i++) probability[i]=histogram[i]; /* Generate probability of graylevels and mean value for separation. */ omega[0]=probability[0]; myu[0]=0.0; for (i=1; i <= (ssize_t) MaxIntensity; i++) { omega[i]=omega[i-1]+probability[i]; myu[i]=myu[i-1]+i*probability[i]; } /* Sigma maximization: inter-class variance and compute optimal threshold. */ threshold=0; max_sigma=0.0; for (i=0; i < (ssize_t) MaxIntensity; i++) { sigma[i]=0.0; if ((omega[i] != 0.0) && (omega[i] != 1.0)) sigma[i]=pow(myu[MaxIntensity]*omega[i]-myu[i],2.0)/(omega[i]*(1.0- omega[i])); if (sigma[i] > max_sigma) { max_sigma=sigma[i]; threshold=(double) i; } } /* Free resources. */ myu=(double *) RelinquishMagickMemory(myu); omega=(double *) RelinquishMagickMemory(omega); probability=(double *) RelinquishMagickMemory(probability); sigma=(double *) RelinquishMagickMemory(sigma); return(100.0*threshold/MaxIntensity); } static double TriangleThreshold(const double *histogram, ExceptionInfo *exception) { double a, b, c, count, distance, inverse_ratio, max_distance, segment, x1, x2, y1, y2; register ssize_t i; ssize_t end, max, start, threshold; /* Compute optimal threshold with triangle algorithm. */ (void) exception; start=0; /* find start bin, first bin not zero count */ for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > 0.0) { start=i; break; } end=0; /* find end bin, last bin not zero count */ for (i=(ssize_t) MaxIntensity; i >= 0; i--) if (histogram[i] > 0.0) { end=i; break; } max=0; /* find max bin, bin with largest count */ count=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) if (histogram[i] > count) { max=i; count=histogram[i]; } /* Compute threshold at split point. */ x1=(double) max; y1=histogram[max]; x2=(double) end; if ((max-start) >= (end-max)) x2=(double) start; y2=0.0; a=y1-y2; b=x2-x1; c=(-1.0)*(a*x1+b*y1); inverse_ratio=1.0/sqrt(a*a+b*b+c*c); threshold=0; max_distance=0.0; if (x2 == (double) start) for (i=start; i < max; i++) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment > 0.0)) { threshold=i; max_distance=distance; } } else for (i=end; i > max; i--) { segment=inverse_ratio*(a*i+b*histogram[i]+c); distance=sqrt(segment*segment); if ((distance > max_distance) && (segment < 0.0)) { threshold=i; max_distance=distance; } } return(100.0*threshold/MaxIntensity); } MagickExport MagickBooleanType AutoThresholdImage(Image *image, const AutoThresholdMethod method,ExceptionInfo *exception) { CacheView *image_view; char property[MagickPathExtent]; double gamma, *histogram, sum, threshold; MagickBooleanType status; register ssize_t i; ssize_t y; /* Form histogram. */ assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); histogram=(double *) AcquireQuantumMemory(MaxIntensity+1UL, sizeof(*histogram)); if (histogram == (double *) NULL) ThrowBinaryException(ResourceLimitError,"MemoryAllocationFailed", image->filename); status=MagickTrue; (void) memset(histogram,0,(MaxIntensity+1UL)*sizeof(*histogram)); image_view=AcquireVirtualCacheView(image,exception); for (y=0; y < (ssize_t) image->rows; y++) { register const Quantum *magick_restrict p; register ssize_t x; p=GetCacheViewVirtualPixels(image_view,0,y,image->columns,1,exception); if (p == (const Quantum *) NULL) break; for (x=0; x < (ssize_t) image->columns; x++) { double intensity = GetPixelIntensity(image,p); histogram[ScaleQuantumToChar(ClampToQuantum(intensity))]++; p+=GetPixelChannels(image); } } image_view=DestroyCacheView(image_view); /* Normalize histogram. */ sum=0.0; for (i=0; i <= (ssize_t) MaxIntensity; i++) sum+=histogram[i]; gamma=PerceptibleReciprocal(sum); for (i=0; i <= (ssize_t) MaxIntensity; i++) histogram[i]=gamma*histogram[i]; /* Discover threshold from histogram. */ switch (method) { case KapurThresholdMethod: { threshold=KapurThreshold(image,histogram,exception); break; } case OTSUThresholdMethod: default: { threshold=OTSUThreshold(image,histogram,exception); break; } case TriangleThresholdMethod: { threshold=TriangleThreshold(histogram,exception); break; } } histogram=(double *) RelinquishMagickMemory(histogram); if (threshold < 0.0) status=MagickFalse; if (status == MagickFalse) return(MagickFalse); /* Threshold image. */ (void) FormatLocaleString(property,MagickPathExtent,"%g%%",threshold); (void) SetImageProperty(image,"auto-threshold:threshold",property,exception); return(BilevelImage(image,QuantumRange*threshold/100.0,exception)); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B i l e v e l I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BilevelImage() changes the value of individual pixels based on the % intensity of each pixel channel. The result is a high-contrast image. % % More precisely each channel value of the image is 'thresholded' so that if % it is equal to or less than the given value it is set to zero, while any % value greater than that give is set to it maximum or QuantumRange. % % This function is what is used to implement the "-threshold" operator for % the command line API. % % If the default channel setting is given the image is thresholded using just % the gray 'intensity' of the image, rather than the individual channels. % % The format of the BilevelImage method is: % % MagickBooleanType BilevelImage(Image *image,const double threshold, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold values. % % o exception: return any errors or warnings in this structure. % % Aside: You can get the same results as operator using LevelImages() % with the 'threshold' value for both the black_point and the white_point. % */ MagickExport MagickBooleanType BilevelImage(Image *image,const double threshold, ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); /* Bilevel threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; q[i]=(Quantum) (pixel <= threshold ? 0 : QuantumRange); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BilevelImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % B l a c k T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % BlackThresholdImage() is like ThresholdImage() but forces all pixels below % the threshold into black while leaving all pixels at or above the threshold % unchanged. % % The format of the BlackThresholdImage method is: % % MagickBooleanType BlackThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType BlackThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) SetImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < GetPixelInfoChannel(&threshold,channel)) q[i]=(Quantum) 0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_BlackThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % C l a m p I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ClampImage() set each pixel whose value is below zero to zero and any the % pixel whose value is above the quantum range to the quantum range (e.g. % 65535) otherwise the pixel value remains unchanged. % % The format of the ClampImage method is: % % MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ClampImage(Image *image,ExceptionInfo *exception) { #define ClampImageTag "Clamp/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) ClampPixel(q->red); q->green=(double) ClampPixel(q->green); q->blue=(double) ClampPixel(q->blue); q->alpha=(double) ClampPixel(q->alpha); q++; } return(SyncImage(image,exception)); } /* Clamp image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; q[i]=ClampPixel((MagickRealType) q[i]); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_ClampImage) #endif proceed=SetImageProgress(image,ClampImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % D e s t r o y T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % DestroyThresholdMap() de-allocate the given ThresholdMap % % The format of the ListThresholdMaps method is: % % ThresholdMap *DestroyThresholdMap(Threshold *map) % % A description of each parameter follows. % % o map: Pointer to the Threshold map to destroy % */ MagickExport ThresholdMap *DestroyThresholdMap(ThresholdMap *map) { assert(map != (ThresholdMap *) NULL); if (map->map_id != (char *) NULL) map->map_id=DestroyString(map->map_id); if (map->description != (char *) NULL) map->description=DestroyString(map->description); if (map->levels != (ssize_t *) NULL) map->levels=(ssize_t *) RelinquishMagickMemory(map->levels); map=(ThresholdMap *) RelinquishMagickMemory(map); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % G e t T h r e s h o l d M a p % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMap() loads and searches one or more threshold map files for the % map matching the given name or alias. % % The format of the GetThresholdMap method is: % % ThresholdMap *GetThresholdMap(const char *map_id, % ExceptionInfo *exception) % % A description of each parameter follows. % % o map_id: ID of the map to look for. % % o exception: return any errors or warnings in this structure. % */ MagickExport ThresholdMap *GetThresholdMap(const char *map_id, ExceptionInfo *exception) { ThresholdMap *map; map=GetThresholdMapFile(MinimalThresholdMap,"built-in",map_id,exception); if (map != (ThresholdMap *) NULL) return(map); #if !defined(MAGICKCORE_ZERO_CONFIGURATION_SUPPORT) { const StringInfo *option; LinkedListInfo *options; options=GetConfigureOptions(ThresholdsFilename,exception); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { map=GetThresholdMapFile((const char *) GetStringInfoDatum(option), GetStringInfoPath(option),map_id,exception); if (map != (ThresholdMap *) NULL) break; option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); } #endif return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + G e t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % GetThresholdMapFile() look for a given threshold map name or alias in the % given XML file data, and return the allocated the map when found. % % The format of the ListThresholdMaps method is: % % ThresholdMap *GetThresholdMap(const char *xml,const char *filename, % const char *map_id,ExceptionInfo *exception) % % A description of each parameter follows. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o map_id: ID of the map to look for in XML list. % % o exception: return any errors or warnings in this structure. % */ static ThresholdMap *GetThresholdMapFile(const char *xml,const char *filename, const char *map_id,ExceptionInfo *exception) { char *p; const char *attribute, *content; double value; register ssize_t i; ThresholdMap *map; XMLTreeInfo *description, *levels, *threshold, *thresholds; (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); map=(ThresholdMap *) NULL; thresholds=NewXMLTree(xml,exception); if (thresholds == (XMLTreeInfo *) NULL) return(map); for (threshold=GetXMLTreeChild(thresholds,"threshold"); threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { attribute=GetXMLTreeAttribute(threshold,"map"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; attribute=GetXMLTreeAttribute(threshold,"alias"); if ((attribute != (char *) NULL) && (LocaleCompare(map_id,attribute) == 0)) break; } if (threshold == (XMLTreeInfo *) NULL) { thresholds=DestroyXMLTree(thresholds); return(map); } description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); return(map); } levels=GetXMLTreeChild(threshold,"levels"); if (levels == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<levels>, map \"%s\"", map_id); thresholds=DestroyXMLTree(thresholds); return(map); } map=(ThresholdMap *) AcquireCriticalMemory(sizeof(*map)); map->map_id=(char *) NULL; map->description=(char *) NULL; map->levels=(ssize_t *) NULL; attribute=GetXMLTreeAttribute(threshold,"map"); if (attribute != (char *) NULL) map->map_id=ConstantString(attribute); content=GetXMLTreeContent(description); if (content != (char *) NULL) map->description=ConstantString(content); attribute=GetXMLTreeAttribute(levels,"width"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->width=StringToUnsignedLong(attribute); if (map->width == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels width>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"height"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->height=StringToUnsignedLong(attribute); if (map->height == 0) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels height>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } attribute=GetXMLTreeAttribute(levels,"divisor"); if (attribute == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->divisor=(ssize_t) StringToLong(attribute); if (map->divisor < 2) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidAttribute", "<levels divisor>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=GetXMLTreeContent(levels); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<levels>, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } map->levels=(ssize_t *) AcquireQuantumMemory((size_t) map->width,map->height* sizeof(*map->levels)); if (map->levels == (ssize_t *) NULL) ThrowFatalException(ResourceLimitFatalError,"UnableToAcquireThresholdMap"); for (i=0; i < (ssize_t) (map->width*map->height); i++) { map->levels[i]=(ssize_t) strtol(content,&p,10); if (p == content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too few values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } if ((map->levels[i] < 0) || (map->levels[i] > map->divisor)) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> %.20g out of range, map \"%s\"", (double) map->levels[i],map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } content=p; } value=(double) strtol(content,&p,10); (void) value; if (p != content) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlInvalidContent", "<level> too many values, map \"%s\"",map_id); thresholds=DestroyXMLTree(thresholds); map=DestroyThresholdMap(map); return(map); } thresholds=DestroyXMLTree(thresholds); return(map); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % + L i s t T h r e s h o l d M a p F i l e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMapFile() lists the threshold maps and their descriptions % in the given XML file data. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,const char*xml, % const char *filename,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o xml: The threshold map list in XML format. % % o filename: The threshold map XML filename. % % o exception: return any errors or warnings in this structure. % */ MagickBooleanType ListThresholdMapFile(FILE *file,const char *xml, const char *filename,ExceptionInfo *exception) { const char *alias, *content, *map; XMLTreeInfo *description, *threshold, *thresholds; assert( xml != (char *) NULL ); assert( file != (FILE *) NULL ); (void) LogMagickEvent(ConfigureEvent,GetMagickModule(), "Loading threshold map file \"%s\" ...",filename); thresholds=NewXMLTree(xml,exception); if ( thresholds == (XMLTreeInfo *) NULL ) return(MagickFalse); (void) FormatLocaleFile(file,"%-16s %-12s %s\n","Map","Alias","Description"); (void) FormatLocaleFile(file, "----------------------------------------------------\n"); threshold=GetXMLTreeChild(thresholds,"threshold"); for ( ; threshold != (XMLTreeInfo *) NULL; threshold=GetNextXMLTreeTag(threshold)) { map=GetXMLTreeAttribute(threshold,"map"); if (map == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingAttribute", "<map>"); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } alias=GetXMLTreeAttribute(threshold,"alias"); description=GetXMLTreeChild(threshold,"description"); if (description == (XMLTreeInfo *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingElement", "<description>, map \"%s\"",map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } content=GetXMLTreeContent(description); if (content == (char *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "XmlMissingContent", "<description>, map \"%s\"", map); thresholds=DestroyXMLTree(thresholds); return(MagickFalse); } (void) FormatLocaleFile(file,"%-16s %-12s %s\n",map,alias ? alias : "", content); } thresholds=DestroyXMLTree(thresholds); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % L i s t T h r e s h o l d M a p s % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % ListThresholdMaps() lists the threshold maps and their descriptions % as defined by "threshold.xml" to a file. % % The format of the ListThresholdMaps method is: % % MagickBooleanType ListThresholdMaps(FILE *file,ExceptionInfo *exception) % % A description of each parameter follows. % % o file: An pointer to the output FILE. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType ListThresholdMaps(FILE *file, ExceptionInfo *exception) { const StringInfo *option; LinkedListInfo *options; MagickStatusType status; status=MagickTrue; if (file == (FILE *) NULL) file=stdout; options=GetConfigureOptions(ThresholdsFilename,exception); (void) FormatLocaleFile(file, "\n Threshold Maps for Ordered Dither Operations\n"); option=(const StringInfo *) GetNextValueInLinkedList(options); while (option != (const StringInfo *) NULL) { (void) FormatLocaleFile(file,"\nPath: %s\n\n",GetStringInfoPath(option)); status&=ListThresholdMapFile(file,(const char *) GetStringInfoDatum(option), GetStringInfoPath(option),exception); option=(const StringInfo *) GetNextValueInLinkedList(options); } options=DestroyConfigureOptions(options); return(status != 0 ? MagickTrue : MagickFalse); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % O r d e r e d D i t h e r I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % OrderedDitherImage() will perform a ordered dither based on a number % of pre-defined dithering threshold maps, but over multiple intensity % levels, which can be different for different channels, according to the % input argument. % % The format of the OrderedDitherImage method is: % % MagickBooleanType OrderedDitherImage(Image *image, % const char *threshold_map,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold_map: A string containing the name of the threshold dither % map to use, followed by zero or more numbers representing the number % of color levels tho dither between. % % Any level number less than 2 will be equivalent to 2, and means only % binary dithering will be applied to each color channel. % % No numbers also means a 2 level (bitmap) dither will be applied to all % channels, while a single number is the number of levels applied to each % channel in sequence. More numbers will be applied in turn to each of % the color channels. % % For example: "o3x3,6" will generate a 6 level posterization of the % image with a ordered 3x3 diffused pixel dither being applied between % each level. While checker,8,8,4 will produce a 332 colormaped image % with only a single checkerboard hash pattern (50% grey) between each % color level, to basically double the number of color levels with % a bare minimim of dithering. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType OrderedDitherImage(Image *image, const char *threshold_map,ExceptionInfo *exception) { #define DitherImageTag "Dither/Image" CacheView *image_view; char token[MagickPathExtent]; const char *p; double levels[CompositePixelChannel]; MagickBooleanType status; MagickOffsetType progress; register ssize_t i; ssize_t y; ThresholdMap *map; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (threshold_map == (const char *) NULL) return(MagickTrue); p=(char *) threshold_map; while (((isspace((int) ((unsigned char) *p)) != 0) || (*p == ',')) && (*p != '\0')) p++; threshold_map=p; while (((isspace((int) ((unsigned char) *p)) == 0) && (*p != ',')) && (*p != '\0')) { if ((p-threshold_map) >= (MagickPathExtent-1)) break; token[p-threshold_map]=(*p); p++; } token[p-threshold_map]='\0'; map=GetThresholdMap(token,exception); if (map == (ThresholdMap *) NULL) { (void) ThrowMagickException(exception,GetMagickModule(),OptionError, "InvalidArgument","%s : '%s'","ordered-dither",threshold_map); return(MagickFalse); } for (i=0; i < MaxPixelChannels; i++) levels[i]=2.0; p=strchr((char *) threshold_map,','); if ((p != (char *) NULL) && (isdigit((int) ((unsigned char) *(++p))) != 0)) { GetNextToken(p,&p,MagickPathExtent,token); for (i=0; (i < MaxPixelChannels); i++) levels[i]=StringToDouble(token,(char **) NULL); for (i=0; (*p != '\0') && (i < MaxPixelChannels); i++) { GetNextToken(p,&p,MagickPathExtent,token); if (*token == ',') GetNextToken(p,&p,MagickPathExtent,token); levels[i]=StringToDouble(token,(char **) NULL); } } for (i=0; i < MaxPixelChannels; i++) if (fabs(levels[i]) >= 1) levels[i]-=1.0; if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; ssize_t n; n=0; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { ssize_t level, threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (fabs(levels[n]) < MagickEpsilon) { n++; continue; } threshold=(ssize_t) (QuantumScale*q[i]*(levels[n]*(map->divisor-1)+1)); level=threshold/(map->divisor-1); threshold-=level*(map->divisor-1); q[i]=ClampToQuantum((double) (level+(threshold >= map->levels[(x % map->width)+map->width*(y % map->height)]))* QuantumRange/levels[n]); n++; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_OrderedDitherImage) #endif proceed=SetImageProgress(image,DitherImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); map=DestroyThresholdMap(map); return(MagickTrue); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % P e r c e p t i b l e I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % PerceptibleImage() set each pixel whose value is less than |epsilon| to % epsilon or -epsilon (whichever is closer) otherwise the pixel value remains % unchanged. % % The format of the PerceptibleImage method is: % % MagickBooleanType PerceptibleImage(Image *image,const double epsilon, % ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o epsilon: the epsilon threshold (e.g. 1.0e-9). % % o exception: return any errors or warnings in this structure. % */ static inline Quantum PerceptibleThreshold(const Quantum quantum, const double epsilon) { double sign; sign=(double) quantum < 0.0 ? -1.0 : 1.0; if ((sign*quantum) >= epsilon) return(quantum); return((Quantum) (sign*epsilon)); } MagickExport MagickBooleanType PerceptibleImage(Image *image, const double epsilon,ExceptionInfo *exception) { #define PerceptibleImageTag "Perceptible/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (image->storage_class == PseudoClass) { register ssize_t i; register PixelInfo *magick_restrict q; q=image->colormap; for (i=0; i < (ssize_t) image->colors; i++) { q->red=(double) PerceptibleThreshold(ClampToQuantum(q->red), epsilon); q->green=(double) PerceptibleThreshold(ClampToQuantum(q->green), epsilon); q->blue=(double) PerceptibleThreshold(ClampToQuantum(q->blue), epsilon); q->alpha=(double) PerceptibleThreshold(ClampToQuantum(q->alpha), epsilon); q++; } return(SyncImage(image,exception)); } /* Perceptible image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if (traits == UndefinedPixelTrait) continue; q[i]=PerceptibleThreshold(q[i],epsilon); } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_PerceptibleImage) #endif proceed=SetImageProgress(image,PerceptibleImageTag,progress++,image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n d o m T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RandomThresholdImage() changes the value of individual pixels based on the % intensity of each pixel compared to a random threshold. The result is a % low-contrast, two color image. % % The format of the RandomThresholdImage method is: % % MagickBooleanType RandomThresholdImage(Image *image, % const char *thresholds,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low,high: Specify the high and low thresholds. These values range from % 0 to QuantumRange. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RandomThresholdImage(Image *image, const double min_threshold, const double max_threshold,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; RandomInfo **magick_restrict random_info; ssize_t y; #if defined(MAGICKCORE_OPENMP_SUPPORT) unsigned long key; #endif assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); assert(exception != (ExceptionInfo *) NULL); assert(exception->signature == MagickCoreSignature); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); GetPixelInfo(image,&threshold); /* Random threshold image. */ status=MagickTrue; progress=0; random_info=AcquireRandomInfoThreadSet(); image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) key=GetRandomSecretKey(random_info[0]); #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,key == ~0UL) #endif for (y=0; y < (ssize_t) image->rows; y++) { const int id = GetOpenMPThreadId(); register Quantum *magick_restrict q; register ssize_t x; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { register ssize_t i; for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { double threshold; PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if ((double) q[i] < min_threshold) threshold=min_threshold; else if ((double) q[i] > max_threshold) threshold=max_threshold; else threshold=(double) (QuantumRange* GetPseudoRandomValue(random_info[id])); q[i]=(double) q[i] <= threshold ? 0 : QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RandomThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); random_info=DestroyRandomInfoThreadSet(random_info); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % R a n g e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % RangeThresholdImage() applies soft and hard thresholding. % % The format of the RangeThresholdImage method is: % % MagickBooleanType RangeThresholdImage(Image *image, % const double low_black,const double low_white,const double high_white, % const double high_black,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o low_black: Define the minimum threshold value. % % o low_white: Define the maximum threshold value. % % o high_white: Define the minimum threshold value. % % o low_white: Define the maximum threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType RangeThresholdImage(Image *image, const double low_black,const double low_white,const double high_white, const double high_black,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; MagickBooleanType status; MagickOffsetType progress; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); /* Range threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel < low_black) q[i]=0; else if ((pixel >= low_black) && (pixel < low_white)) q[i]=ClampToQuantum(QuantumRange* PerceptibleReciprocal(low_white-low_black)*(pixel-low_black)); else if ((pixel >= low_white) && (pixel <= high_white)) q[i]=QuantumRange; else if ((pixel > high_white) && (pixel <= high_black)) q[i]=ClampToQuantum(QuantumRange*PerceptibleReciprocal( high_black-high_white)*(high_black-pixel)); else if (pixel > high_black) q[i]=0; else q[i]=0; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_RangeThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); } /* %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % % % % % % W h i t e T h r e s h o l d I m a g e % % % % % % % %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% % % WhiteThresholdImage() is like ThresholdImage() but forces all pixels above % the threshold into white while leaving all pixels at or below the threshold % unchanged. % % The format of the WhiteThresholdImage method is: % % MagickBooleanType WhiteThresholdImage(Image *image, % const char *threshold,ExceptionInfo *exception) % % A description of each parameter follows: % % o image: the image. % % o threshold: Define the threshold value. % % o exception: return any errors or warnings in this structure. % */ MagickExport MagickBooleanType WhiteThresholdImage(Image *image, const char *thresholds,ExceptionInfo *exception) { #define ThresholdImageTag "Threshold/Image" CacheView *image_view; GeometryInfo geometry_info; MagickBooleanType status; MagickOffsetType progress; PixelInfo threshold; MagickStatusType flags; ssize_t y; assert(image != (Image *) NULL); assert(image->signature == MagickCoreSignature); if (image->debug != MagickFalse) (void) LogMagickEvent(TraceEvent,GetMagickModule(),"%s",image->filename); if (thresholds == (const char *) NULL) return(MagickTrue); if (SetImageStorageClass(image,DirectClass,exception) == MagickFalse) return(MagickFalse); if (IsGrayColorspace(image->colorspace) != MagickFalse) (void) TransformImageColorspace(image,sRGBColorspace,exception); GetPixelInfo(image,&threshold); flags=ParseGeometry(thresholds,&geometry_info); threshold.red=geometry_info.rho; threshold.green=geometry_info.rho; threshold.blue=geometry_info.rho; threshold.black=geometry_info.rho; threshold.alpha=100.0; if ((flags & SigmaValue) != 0) threshold.green=geometry_info.sigma; if ((flags & XiValue) != 0) threshold.blue=geometry_info.xi; if ((flags & PsiValue) != 0) threshold.alpha=geometry_info.psi; if (threshold.colorspace == CMYKColorspace) { if ((flags & PsiValue) != 0) threshold.black=geometry_info.psi; if ((flags & ChiValue) != 0) threshold.alpha=geometry_info.chi; } if ((flags & PercentValue) != 0) { threshold.red*=(MagickRealType) (QuantumRange/100.0); threshold.green*=(MagickRealType) (QuantumRange/100.0); threshold.blue*=(MagickRealType) (QuantumRange/100.0); threshold.black*=(MagickRealType) (QuantumRange/100.0); threshold.alpha*=(MagickRealType) (QuantumRange/100.0); } /* White threshold image. */ status=MagickTrue; progress=0; image_view=AcquireAuthenticCacheView(image,exception); #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp parallel for schedule(static) shared(progress,status) \ magick_number_threads(image,image,image->rows,1) #endif for (y=0; y < (ssize_t) image->rows; y++) { register ssize_t x; register Quantum *magick_restrict q; if (status == MagickFalse) continue; q=GetCacheViewAuthenticPixels(image_view,0,y,image->columns,1,exception); if (q == (Quantum *) NULL) { status=MagickFalse; continue; } for (x=0; x < (ssize_t) image->columns; x++) { double pixel; register ssize_t i; pixel=GetPixelIntensity(image,q); for (i=0; i < (ssize_t) GetPixelChannels(image); i++) { PixelChannel channel = GetPixelChannelChannel(image,i); PixelTrait traits = GetPixelChannelTraits(image,channel); if ((traits & UpdatePixelTrait) == 0) continue; if (image->channel_mask != DefaultChannels) pixel=(double) q[i]; if (pixel > GetPixelInfoChannel(&threshold,channel)) q[i]=QuantumRange; } q+=GetPixelChannels(image); } if (SyncCacheViewAuthenticPixels(image_view,exception) == MagickFalse) status=MagickFalse; if (image->progress_monitor != (MagickProgressMonitor) NULL) { MagickBooleanType proceed; #if defined(MAGICKCORE_OPENMP_SUPPORT) #pragma omp critical (MagickCore_WhiteThresholdImage) #endif proceed=SetImageProgress(image,ThresholdImageTag,progress++, image->rows); if (proceed == MagickFalse) status=MagickFalse; } } image_view=DestroyCacheView(image_view); return(status); }
ast-dump-openmp-declare-variant-extensions-messages.c
// RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify %s // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -verify %s -x c++ int dummy(void) { return 1; } #pragma omp declare variant(dummy) match(implementation={extension(match_any,match_all)}, device={kind(cpu, gpu)}) // expected-error {{only a single match extension allowed per OpenMP context selector}} expected-note {{the previous context property 'match_any' used here}} // expected-note {{the ignored property spans until here}} int base1(void) { return 2; } #pragma omp declare variant(dummy) match(implementation={extension(match_none,match_none)}, device={kind(gpu, fpga)}) // expected-warning {{the context property 'match_none' was used already in the same 'omp declare variant' directive; property ignored}} expected-note {{the previous context property 'match_none' used here}} expected-note {{the ignored property spans until here}} int base2(void) { return 3; } #pragma omp declare variant(dummy) match(implementation={vendor(pgi), extension(match_none,match_any)}, device={kind(cpu, gpu)}) // expected-error {{only a single match extension allowed per OpenMP context selector}} expected-note {{the previous context property 'match_none' used here}} // expected-note {{the ignored property spans until here}} int base3(void) { return 4; } int test(void) { return base1() + base2() + base3(); }
shared-clause.c
#include <stdio.h> #ifdef _OPENMP #include <omp.h> #endif main() { int i, n = 7; int a[n]; for (i=0; i<n; i++) a[i] = i+1; // Cada hebra usará la misma variable, la misma posición de memoria. //En este caso afecta cada hebra a distintas posiciones del vector, por eso, //no se estorban mutuamente. #pragma omp parallel for shared(a) for (i=0; i<n; i++) a[i] += i; printf("Después de parallel for:\n"); for (i=0; i<n; i++) printf("a[%d] = %d\n",i,a[i]); }
NetCDFMesh.h
/** * @file * This file is part of PUMGen * * For conditions of distribution and use, please see the copyright * notice in the file 'COPYING' at the root directory of this package * and the copyright notice at https://github.com/SeisSol/PUMGen * * @copyright 2017 Technical University of Munich * @author Sebastian Rettenberger <sebastian.rettenberger@tum.de> */ #ifndef NETCDF_MESH_H #define NETCDF_MESH_H #include <mpi.h> #include <netcdf.h> #include <netcdf_par.h> #include <apfConvert.h> #include <apfMDS.h> #include <apfMesh2.h> #include <PCU.h> #include <gmi_null.h> #include "utils/logger.h" #include "MeshInput.h" #include "NetCDFPartition.h" #include "ParallelVertexFilter.h" /** * Read PUMGen generated mesh files */ class NetCDFMesh : public MeshInput { public: NetCDFMesh(const char* meshFile, MPI_Comm comm = MPI_COMM_WORLD) { int rank = 0; int nProcs = 1; MPI_Comm_rank(comm, &rank); MPI_Comm_size(comm, &nProcs); gmi_register_null(); gmi_model* model = gmi_load(".null"); m_mesh = apf::makeEmptyMdsMesh(model, 3, false); int ncFile; checkNcError(nc_open_par(meshFile, NC_MPIIO, comm, MPI_INFO_NULL, &ncFile)); // Get number of partitions int ncDimPart; checkNcError(nc_inq_dimid(ncFile, "partitions", &ncDimPart)); size_t nPartitions; checkNcError(nc_inq_dimlen(ncFile, ncDimPart, &nPartitions)); // Local partitions unsigned int nMaxLocalPart = (nPartitions + nProcs - 1) / nProcs; unsigned int nLocalPart = nMaxLocalPart; if (nPartitions < (rank+1) * nMaxLocalPart) nLocalPart = std::max(0, static_cast<int>(nPartitions - rank * nMaxLocalPart)); MPI_Comm commIO; MPI_Comm_split(MPI_COMM_WORLD, (nLocalPart > 0 ? 0 : MPI_UNDEFINED), 0, &commIO); // Reopen netCDF file with correct communicator checkNcError(nc_close(ncFile)); if (nLocalPart > 0) checkNcError(nc_open_par(meshFile, NC_MPIIO, commIO, MPI_INFO_NULL, &ncFile)); PCU_Switch_Comm(commIO); unsigned int nElements = 0; unsigned int nVertices = 0; int* elements = 0L; double* vertices = 0L; int* boundaries = 0L; int* groups = 0L; if (nLocalPart > 0) { // Create netCDF variables int ncVarElemSize; checkNcError(nc_inq_varid(ncFile, "element_size", &ncVarElemSize)); collectiveAccess(ncFile, ncVarElemSize); int ncVarElemVertices; checkNcError(nc_inq_varid(ncFile, "element_vertices", &ncVarElemVertices)); collectiveAccess(ncFile, ncVarElemVertices); int ncVarElemBoundaries; checkNcError(nc_inq_varid(ncFile, "element_boundaries", &ncVarElemBoundaries)); collectiveAccess(ncFile, ncVarElemBoundaries); int ncVarElemGroup; bool useGroups = true; if (nc_inq_varid(ncFile, "element_group", &ncVarElemGroup) != NC_NOERR) { useGroups = false; logWarning() << "No group found, using group 0 for all elements"; }else{ collectiveAccess(ncFile, ncVarElemGroup); } int ncVarVrtxSize; checkNcError(nc_inq_varid(ncFile, "vertex_size", &ncVarVrtxSize)); collectiveAccess(ncFile, ncVarVrtxSize); int ncVarVrtxCoords; checkNcError(nc_inq_varid(ncFile, "vertex_coordinates", &ncVarVrtxCoords)); collectiveAccess(ncFile, ncVarVrtxCoords); Partition* partitions = new Partition[nLocalPart]; // Read elements logInfo(rank) << "Reading netCDF file"; for (unsigned int i = 0; i < nMaxLocalPart; i++) { unsigned int j = i % nLocalPart; size_t start[3] = {j + rank*nMaxLocalPart, 0, 0}; // Element size unsigned int size; checkNcError(nc_get_var1_uint(ncFile, ncVarElemSize, start, &size)); partitions[j].setElemSize(size); size_t count[3] = {1, size, 4}; // Elements checkNcError(nc_get_vara_int(ncFile, ncVarElemVertices, start, count, partitions[j].elements())); // Boundaries and group checkNcError(nc_get_vara_int(ncFile, ncVarElemBoundaries, start, count, partitions[j].boundaries())); if (useGroups) checkNcError(nc_get_vara_int(ncFile, ncVarElemGroup, start, count, partitions[j].groups())); // Vertex size checkNcError(nc_get_var1_uint(ncFile, ncVarVrtxSize, start, &size)); partitions[j].setVrtxSize(size); // Vertices count[1] = size; count[2] = 3; checkNcError(nc_get_vara_double(ncFile, ncVarVrtxCoords, start, count, partitions[j].vertices())); } checkNcError(nc_close(ncFile)); for (unsigned int i = 0; i < nLocalPart; i++) { nElements += partitions[i].nElements(); nVertices += partitions[i].nVertices(); } // Copy to the buffer unsigned int* elementsLocal = new unsigned int[nElements*4]; elements = new int[nElements*4]; vertices = new double[nVertices*3]; boundaries = new int[nElements*4]; groups = new int[nElements]; unsigned int elementOffset = 0; unsigned int vertexOffset = 0; for (unsigned int i = 0; i < nLocalPart; i++) { #ifdef _OPENMP #pragma omp parallel #endif for (unsigned int j = 0; j < partitions[i].nElements()*4; j++) elementsLocal[elementOffset*4 + j] = partitions[i].elements()[j] + vertexOffset; memcpy(&vertices[vertexOffset*3], partitions[i].vertices(), partitions[i].nVertices()*3*sizeof(double)); partitions[i].convertBoundary(); memcpy(&boundaries[elementOffset*4], partitions[i].boundaries(), partitions[i].nElements()*4*sizeof(int)); memcpy(&groups[elementOffset], partitions[i].groups(), partitions[i].nElements()*sizeof(int)); elementOffset += partitions[i].nElements(); vertexOffset += partitions[i].nVertices(); } logInfo(rank) << "Running vertex filter"; ParallelVertexFilter filter(commIO); filter.filter(nVertices, vertices); // Create filtered vertex list delete [] vertices; nVertices = filter.numLocalVertices(); vertices = new double[nVertices*3]; memcpy(vertices, filter.localVertices(), nVertices*3*sizeof(double)); logInfo(rank) << "Converting local to global vertex identifier"; #ifdef _OPENMP #pragma omp parallel #endif for (unsigned int i = 0; i < nElements*4; i++) elements[i] = filter.globalIds()[elementsLocal[i]]; delete [] partitions; } logInfo(rank) << "Constructing the mesh"; apf::GlobalToVert vertMap; apf::construct(m_mesh, elements, nElements, apf::Mesh::TET, vertMap); delete [] elements; apf::alignMdsRemotes(m_mesh); apf::deriveMdsModel(m_mesh); logInfo(rank) << "Set coordinates in APF"; apf::setCoords(m_mesh, vertices, nVertices, vertMap); delete [] vertices; // Set boundaries apf::MeshTag* boundaryTag = m_mesh->createIntTag("boundary condition", 1); apf::MeshIterator* it = m_mesh->begin(3); unsigned int i = 0; while (apf::MeshEntity* element = m_mesh->iterate(it)) { apf::Adjacent adjacent; m_mesh->getAdjacent(element, 2, adjacent); for (unsigned int j = 0; j < 4; j++) { if (!boundaries[i*4 + j]) continue; m_mesh->setIntTag(adjacent[j], boundaryTag, &boundaries[i*4 + j]); } i++; } m_mesh->end(it); delete [] boundaries; // Set groups apf::MeshTag* groupTag = m_mesh->createIntTag("group", 1); it = m_mesh->begin(3); i = 0; while (apf::MeshEntity* element = m_mesh->iterate(it)) { m_mesh->setIntTag(element, groupTag, &groups[i]); i++; } m_mesh->end(it); delete [] groups; PCU_Switch_Comm(MPI_COMM_WORLD); } private: /** * Switch to collective access for a netCDf variable */ static void collectiveAccess(int ncFile, int ncVar) { checkNcError(nc_var_par_access(ncFile, ncVar, NC_COLLECTIVE)); } static void checkNcError(int error) { if (error != NC_NOERR) logError() << "Error while reading netCDF file:" << nc_strerror(error); } }; #endif // NETCDF_MESH_H
GB_binop__rminus_uint8.c
//------------------------------------------------------------------------------ // GB_binop: hard-coded functions for each built-in binary operator //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved. // SPDX-License-Identifier: Apache-2.0 //------------------------------------------------------------------------------ // If this file is in the Generated2/ folder, do not edit it // (it is auto-generated from Generator/*). #include "GB.h" #ifndef GBCOMPACT #include "GB_emult.h" #include "GB_control.h" #include "GB_ek_slice.h" #include "GB_dense.h" #include "GB_atomics.h" #include "GB_bitmap_assign_methods.h" #include "GB_binop__include.h" // C=binop(A,B) is defined by the following types and operators: // A+B function (eWiseAdd): GB (_AaddB__rminus_uint8) // A.*B function (eWiseMult): GB (_AemultB_08__rminus_uint8) // A.*B function (eWiseMult): GB (_AemultB_02__rminus_uint8) // A.*B function (eWiseMult): GB (_AemultB_04__rminus_uint8) // A.*B function (eWiseMult): GB (_AemultB_bitmap__rminus_uint8) // A*D function (colscale): GB (_AxD__rminus_uint8) // D*A function (rowscale): GB (_DxB__rminus_uint8) // C+=B function (dense accum): GB (_Cdense_accumB__rminus_uint8) // C+=b function (dense accum): GB (_Cdense_accumb__rminus_uint8) // C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__rminus_uint8) // C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__rminus_uint8) // C=scalar+B GB (_bind1st__rminus_uint8) // C=scalar+B' GB (_bind1st_tran__rminus_uint8) // C=A+scalar GB (_bind2nd__rminus_uint8) // C=A'+scalar GB (_bind2nd_tran__rminus_uint8) // C type: uint8_t // A type: uint8_t // A pattern? 0 // B type: uint8_t // B pattern? 0 // BinaryOp: cij = (bij - aij) #define GB_ATYPE \ uint8_t #define GB_BTYPE \ uint8_t #define GB_CTYPE \ uint8_t // true if the types of A and B are identical #define GB_ATYPE_IS_BTYPE \ 1 // true if the types of C and A are identical #define GB_CTYPE_IS_ATYPE \ 1 // true if the types of C and B are identical #define GB_CTYPE_IS_BTYPE \ 1 // aij = Ax [pA] #define GB_GETA(aij,Ax,pA,A_iso) \ uint8_t aij = GBX (Ax, pA, A_iso) // true if values of A are not used #define GB_A_IS_PATTERN \ 0 \ // bij = Bx [pB] #define GB_GETB(bij,Bx,pB,B_iso) \ uint8_t bij = GBX (Bx, pB, B_iso) // true if values of B are not used #define GB_B_IS_PATTERN \ 0 \ // declare scalar of the same type as C #define GB_CTYPE_SCALAR(t) \ uint8_t t // cij = Ax [pA] #define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \ cij = GBX (Ax, pA, A_iso) // cij = Bx [pB] #define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \ cij = GBX (Bx, pB, B_iso) #define GB_CX(p) Cx [p] // binary operator #define GB_BINOP(z,x,y,i,j) \ z = (y - x) ; // true if the binop must be flipped #define GB_BINOP_FLIP \ 0 // op is second #define GB_OP_IS_SECOND \ 0 // do the numerical phases of GB_add and GB_emult #define GB_PHASE_2_OF_2 // hard-coded loops can be vectorized #define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD // disable this operator and use the generic case if these conditions hold #define GB_DISABLE \ (GxB_NO_RMINUS || GxB_NO_UINT8 || GxB_NO_RMINUS_UINT8) //------------------------------------------------------------------------------ // C += A+B, all 3 matrices dense //------------------------------------------------------------------------------ // The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV. void GB (_Cdense_ewise3_accum__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_accum_template.c" } //------------------------------------------------------------------------------ // C = A+B, all 3 matrices dense //------------------------------------------------------------------------------ void GB (_Cdense_ewise3_noaccum__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix B, const int nthreads ) { #include "GB_dense_ewise3_noaccum_template.c" } //------------------------------------------------------------------------------ // C += B, accumulate a sparse matrix into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumB__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix B, const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { #include "GB_dense_subassign_23_template.c" } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C += b, accumulate a scalar into a dense matrix //------------------------------------------------------------------------------ GrB_Info GB (_Cdense_accumb__rminus_uint8) ( GrB_Matrix C, const GB_void *p_bwork, const int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else { // get the scalar b for C += b, of type uint8_t uint8_t bwork = (*((uint8_t *) p_bwork)) ; #include "GB_dense_subassign_22_template.c" return (GrB_SUCCESS) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = A*D, column scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_AxD__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GrB_Matrix D, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_colscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = D*B, row scale with diagonal D matrix //------------------------------------------------------------------------------ GrB_Info GB (_DxB__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix D, const GrB_Matrix B, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *restrict Cx = (uint8_t *) C->x ; #include "GB_AxB_rowscale_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B //------------------------------------------------------------------------------ GrB_Info GB (_AaddB__rminus_uint8) ( GrB_Matrix C, const int C_sparsity, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool is_eWiseUnion, const GB_void *alpha_scalar_in, const GB_void *beta_scalar_in, const bool Ch_is_Mh, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else GB_WERK_DECLARE (M_ek_slicing, int64_t) ; GB_WERK_DECLARE (A_ek_slicing, int64_t) ; GB_WERK_DECLARE (B_ek_slicing, int64_t) ; uint8_t alpha_scalar ; uint8_t beta_scalar ; if (is_eWiseUnion) { alpha_scalar = (*((uint8_t *) alpha_scalar_in)) ; beta_scalar = (*((uint8_t *) beta_scalar_in )) ; } #include "GB_add_template.c" GB_FREE_WORKSPACE ; return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_08__rminus_uint8) ( GrB_Matrix C, const int C_sparsity, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict C_to_M, const int64_t *restrict C_to_A, const int64_t *restrict C_to_B, const GB_task_struct *restrict TaskList, const int C_ntasks, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_08_meta.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_02__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const bool flipxy, const int64_t *restrict Cp_kfirst, const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #if GB_BINOP_FLIP // The operator is not commutative, and does not have a flipped // variant. For example z=atan2(y,x). if (flipxy) { // use fmult(y,x) #undef GB_FLIPPED #define GB_FLIPPED 1 #include "GB_emult_02_template.c" } else { // use fmult(x,y) #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" } #else // No need to handle the flip: the operator is either commutative, or // has been handled by changing z=div(y,x) to z=rdiv(x,y) for example. #undef GB_FLIPPED #define GB_FLIPPED 0 #include "GB_emult_02_template.c" #endif return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_04__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix M, const bool Mask_struct, const GrB_Matrix A, const GrB_Matrix B, const int64_t *restrict Cp_kfirst, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_emult_04_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap //------------------------------------------------------------------------------ GrB_Info GB (_AemultB_bitmap__rminus_uint8) ( GrB_Matrix C, const int ewise_method, const GrB_Matrix M, const bool Mask_struct, const bool Mask_comp, const GrB_Matrix A, const GrB_Matrix B, const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads, const int C_nthreads, GB_Context Context ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else #include "GB_bitmap_emult_template.c" return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st //------------------------------------------------------------------------------ GrB_Info GB (_bind1st__rminus_uint8) ( GB_void *Cx_output, // Cx and Bx may be aliased const GB_void *x_input, const GB_void *Bx_input, const int8_t *restrict Bb, int64_t bnz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t x = (*((uint8_t *) x_input)) ; uint8_t *Bx = (uint8_t *) Bx_input ; int64_t p ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < bnz ; p++) { if (!GBB (Bb, p)) continue ; uint8_t bij = GBX (Bx, p, false) ; Cx [p] = (bij - x) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd //------------------------------------------------------------------------------ GrB_Info GB (_bind2nd__rminus_uint8) ( GB_void *Cx_output, // Cx and Ax may be aliased const GB_void *Ax_input, const GB_void *y_input, const int8_t *restrict Ab, int64_t anz, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else int64_t p ; uint8_t *Cx = (uint8_t *) Cx_output ; uint8_t *Ax = (uint8_t *) Ax_input ; uint8_t y = (*((uint8_t *) y_input)) ; #pragma omp parallel for num_threads(nthreads) schedule(static) for (p = 0 ; p < anz ; p++) { if (!GBB (Ab, p)) continue ; uint8_t aij = GBX (Ax, p, false) ; Cx [p] = (y - aij) ; } return (GrB_SUCCESS) ; #endif } //------------------------------------------------------------------------------ // C = op (x, A'): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (x, aij), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (aij - x) ; \ } GrB_Info GB (_bind1st_tran__rminus_uint8) ( GrB_Matrix C, const GB_void *x_input, const GrB_Matrix A, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { // GB_unop_transpose.c uses GB_ATYPE, but A is // the 2nd input to binary operator z=f(x,y). #undef GB_ATYPE #define GB_ATYPE \ uint8_t #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t x = (*((const uint8_t *) x_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif #undef GB_ATYPE #define GB_ATYPE \ uint8_t } //------------------------------------------------------------------------------ // C = op (A', y): transpose and apply a binary operator //------------------------------------------------------------------------------ // cij = op (aij, y), no typecasting (in spite of the macro name) #undef GB_CAST_OP #define GB_CAST_OP(pC,pA) \ { \ uint8_t aij = GBX (Ax, pA, false) ; \ Cx [pC] = (y - aij) ; \ } GrB_Info GB (_bind2nd_tran__rminus_uint8) ( GrB_Matrix C, const GrB_Matrix A, const GB_void *y_input, int64_t *restrict *Workspaces, const int64_t *restrict A_slice, int nworkspaces, int nthreads ) { #if GB_DISABLE return (GrB_NO_VALUE) ; #else uint8_t y = (*((const uint8_t *) y_input)) ; #include "GB_unop_transpose.c" return (GrB_SUCCESS) ; #endif } #endif
3d25pt.lbpar.c
#include <omp.h> #include <math.h> #define ceild(n,d) ceil(((double)(n))/((double)(d))) #define floord(n,d) floor(((double)(n))/((double)(d))) #define max(x,y) ((x) > (y)? (x) : (y)) #define min(x,y) ((x) < (y)? (x) : (y)) /* * Order-2, 3D 25 point stencil * Adapted from PLUTO and Pochoir test bench * * Tareq Malas */ #include <stdio.h> #include <stdlib.h> #include <sys/time.h> #ifdef LIKWID_PERFMON #include <likwid.h> #endif #include "print_utils.h" #define TESTS 2 #define MAX(a,b) ((a) > (b) ? a : b) #define MIN(a,b) ((a) < (b) ? a : b) #ifndef min #define min(x,y) ((x) < (y)? (x) : (y)) #endif /* Subtract the `struct timeval' values X and Y, * storing the result in RESULT. * * Return 1 if the difference is negative, otherwise 0. */ int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) { /* Perform the carry for the later subtraction by updating y. */ if (x->tv_usec < y->tv_usec) { int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1; y->tv_usec -= 1000000 * nsec; y->tv_sec += nsec; } if (x->tv_usec - y->tv_usec > 1000000) { int nsec = (x->tv_usec - y->tv_usec) / 1000000; y->tv_usec += 1000000 * nsec; y->tv_sec -= nsec; } /* Compute the time remaining to wait. * tv_usec is certainly positive. */ result->tv_sec = x->tv_sec - y->tv_sec; result->tv_usec = x->tv_usec - y->tv_usec; /* Return 1 if result is negative. */ return x->tv_sec < y->tv_sec; } int main(int argc, char *argv[]) { int t, i, j, k, test; int Nx, Ny, Nz, Nt; if (argc > 3) { Nx = atoi(argv[1])+8; Ny = atoi(argv[2])+8; Nz = atoi(argv[3])+8; } if (argc > 4) Nt = atoi(argv[4]); double ****A = (double ****) malloc(sizeof(double***)*2); double ***roc2 = (double ***) malloc(sizeof(double**)); A[0] = (double ***) malloc(sizeof(double**)*Nz); A[1] = (double ***) malloc(sizeof(double**)*Nz); roc2 = (double ***) malloc(sizeof(double**)*Nz); for(i=0; i<Nz; i++){ A[0][i] = (double**) malloc(sizeof(double*)*Ny); A[1][i] = (double**) malloc(sizeof(double*)*Ny); roc2[i] = (double**) malloc(sizeof(double*)*Ny); for(j=0;j<Ny;j++){ A[0][i][j] = (double*) malloc(sizeof(double)*Nx); A[1][i][j] = (double*) malloc(sizeof(double)*Nx); roc2[i][j] = (double*) malloc(sizeof(double)*Nx); } } // tile size information, including extra element to decide the list length int *tile_size = (int*) malloc(sizeof(int)); tile_size[0] = -1; // The list is modified here before source-to-source transformations tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5); tile_size[0] = 4; tile_size[1] = 4; tile_size[2] = 32; tile_size[3] = 256; tile_size[4] = -1; // for timekeeping int ts_return = -1; struct timeval start, end, result; double tdiff = 0.0, min_tdiff=1.e100; const int BASE = 1024; // initialize variables // srand(42); for (i = 1; i < Nz; i++) { for (j = 1; j < Ny; j++) { for (k = 1; k < Nx; k++) { A[0][i][j][k] = 1.0 * (rand() % BASE); roc2[i][j][k] = 2.0 * (rand() % BASE); } } } #ifdef LIKWID_PERFMON LIKWID_MARKER_INIT; #pragma omp parallel { LIKWID_MARKER_THREADINIT; #pragma omp barrier LIKWID_MARKER_START("calc"); } #endif int num_threads = 1; #if defined(_OPENMP) num_threads = omp_get_max_threads(); #endif const double coef0 = -0.28472; const double coef1 = 0.16000; const double coef2 = -0.02000; const double coef3 = 0.00254; const double coef4 = -0.00018; for(test=0; test<TESTS; test++){ gettimeofday(&start, 0); // serial execution - Addition: 6 && Multiplication: 2 /* Copyright (C) 1991-2014 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, see <http://www.gnu.org/licenses/>. */ /* This header is separate from features.h so that the compiler can include it implicitly at the start of every compilation. It must not itself include <features.h> or any other header that includes <features.h> because the implicit include comes before any feature test macros that may be defined in a source file before it first explicitly includes a system header. GCC knows the name of this header in order to preinclude it. */ /* glibc's intent is to support the IEC 559 math functionality, real and complex. If the GCC (4.9 and later) predefined macros specifying compiler intent are available, use them to determine whether the overall intent is to support these features; otherwise, presume an older compiler has intent to support these features and define these macros by default. */ /* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) / Unicode 6.0. */ /* We do not support C11 <threads.h>. */ int t1, t2, t3, t4, t5, t6, t7, t8; int lb, ub, lbp, ubp, lb2, ub2; register int lbv, ubv; /* Start of CLooG code */ if ((Nt >= 1) && (Nx >= 9) && (Ny >= 9) && (Nz >= 9)) { for (t1=-1;t1<=2*Nt-2;t1++) { lbp=ceild(t1+2,2); ubp=min(floord(4*Nt+Nz-9,4),floord(2*t1+Nz-4,4)); #pragma omp parallel for private(lbv,ubv,t3,t4,t5,t6,t7,t8) for (t2=lbp;t2<=ubp;t2++) { for (t3=max(ceild(t1-12,16),ceild(4*t2-Nz-19,32));t3<=min(min(floord(4*Nt+Ny-9,32),floord(2*t1+Ny-3,32)),floord(4*t2+Ny-9,32));t3++) { for (t4=max(max(ceild(t1-124,128),ceild(4*t2-Nz-243,256)),ceild(32*t3-Ny-243,256));t4<=min(min(min(floord(4*Nt+Nx-9,256),floord(2*t1+Nx-3,256)),floord(4*t2+Nx-9,256)),floord(32*t3+Nx+19,256));t4++) { for (t5=max(max(max(ceild(t1,2),ceild(4*t2-Nz+5,4)),ceild(32*t3-Ny+5,4)),ceild(256*t4-Nx+5,4));t5<=floord(t1+1,2);t5++) { for (t6=max(4*t2,-4*t1+4*t2+8*t5-3);t6<=min(min(4*t2+3,-4*t1+4*t2+8*t5),4*t5+Nz-5);t6++) { for (t7=max(32*t3,4*t5+4);t7<=min(32*t3+31,4*t5+Ny-5);t7++) { lbv=max(256*t4,4*t5+4); ubv=min(256*t4+255,4*t5+Nx-5); #pragma ivdep #pragma vector always for (t8=lbv;t8<=ubv;t8++) { A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] = (((2.0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) - A[( t5 + 1) % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (roc2[ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)] * (((((coef0 * A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8)]) + (coef1 * (((((A[ t5 % 2][ (-4*t5+t6) - 1][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 1][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 1][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 1]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 1]))) + (coef2 * (((((A[ t5 % 2][ (-4*t5+t6) - 2][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 2][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 2][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 2]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 2]))) + (coef3 * (((((A[ t5 % 2][ (-4*t5+t6) - 3][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 3][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 3][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 3]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 3]))) + (coef4 * (((((A[ t5 % 2][ (-4*t5+t6) - 4][ (-4*t5+t7)][ (-4*t5+t8)] + A[ t5 % 2][ (-4*t5+t6) + 4][ (-4*t5+t7)][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) - 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7) + 4][ (-4*t5+t8)]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) - 4]) + A[ t5 % 2][ (-4*t5+t6)][ (-4*t5+t7)][ (-4*t5+t8) + 4])))));; } } } } } } } } } /* End of CLooG code */ gettimeofday(&end, 0); ts_return = timeval_subtract(&result, &end, &start); tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6); min_tdiff = MIN(min_tdiff, tdiff); printf("Rank 0 TEST# %d time: %f\n", test, tdiff); } PRINT_RESULTS(4, "constant") #ifdef LIKWID_PERFMON #pragma omp parallel { LIKWID_MARKER_STOP("calc"); } LIKWID_MARKER_CLOSE; #endif // Free allocated arrays for(i=0; i<Nz; i++){ for(j=0;j<Ny;j++){ free(A[0][i][j]); free(A[1][i][j]); free(roc2[i][j]); } free(A[0][i]); free(A[1][i]); free(roc2[i]); } free(A[0]); free(A[1]); free(roc2); return 0; }
testing_dsyr2k.c
/** * * @file testing_dsyr2k.c * * PLASMA testing routines * PLASMA is a software package provided by Univ. of Tennessee, * Univ. of California Berkeley and Univ. of Colorado Denver * * @version 2.6.0 * @author Mathieu Faverge * @date 2010-11-15 * @generated d Tue Jan 7 11:45:18 2014 * **/ #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> #include <plasma.h> #include <cblas.h> #include <lapacke.h> #include <core_blas.h> #include "testing_dmain.h" static int check_solution(PLASMA_enum uplo, PLASMA_enum trans, int N, int K, double alpha, double *A, int LDA, double *B, int LDB, double beta, double *Cref, double *Cplasma, int LDC); int testing_dsyr2k(int argc, char **argv) { /* Check for number of arguments*/ if ( argc != 7 ){ USAGE("SYR2K", "alpha beta M N LDA LDB LDC", " - alpha : alpha coefficient\n" " - beta : beta coefficient\n" " - N : number of columns and rows of matrix C and number of row of matrix A and B\n" " - K : number of columns of matrix A and B\n" " - LDA : leading dimension of matrix A\n" " - LDB : leading dimension of matrix B\n" " - LDC : leading dimension of matrix C\n"); return -1; } double alpha = (double) atol(argv[0]); double beta = (double) atol(argv[1]); int N = atoi(argv[2]); int K = atoi(argv[3]); int LDA = atoi(argv[4]); int LDB = atoi(argv[5]); int LDC = atoi(argv[6]); int NKmax = max(N, K); double eps; int info_solution; int u, t; size_t LDAxK = LDA*NKmax; size_t LDBxK = LDB*NKmax; size_t LDCxN = LDC*N; double *A = (double *)malloc(LDAxK*sizeof(double)); #pragma omp register([LDAxK]A) double *B = (double *)malloc(LDBxK*sizeof(double)); #pragma omp register([LDBxK]B) double *C = (double *)malloc(LDCxN*sizeof(double)); #pragma omp register([LDCxN]C) double *Cinit = (double *)malloc(LDCxN*sizeof(double)); #pragma omp register([LDCxN]Cinit) double *Cfinal = (double *)malloc(LDCxN*sizeof(double)); #pragma omp register([LDCxN]Cfinal) /* Check if unable to allocate memory */ if ( (!A) || (!B) || (!Cinit) || (!Cfinal) ){ printf("Out of Memory \n "); return -2; } eps = LAPACKE_dlamch_work('e'); printf("\n"); printf("------ TESTS FOR PLASMA DSYR2K ROUTINE ------- \n"); printf(" Size of the Matrix C %d by %d\n", N, K); printf("\n"); printf(" The matrix A is randomly generated for each test.\n"); printf("============\n"); printf(" The relative machine precision (eps) is to be %e \n",eps); printf(" Computational tests pass if scaled residuals are less than 10.\n"); /*---------------------------------------------------------- * TESTING DSYR2K */ /* Initialize A,B */ LAPACKE_dlarnv_work(IONE, ISEED, LDAxK, A); LAPACKE_dlarnv_work(IONE, ISEED, LDBxK, B); /* Initialize C */ PLASMA_dplgsy( (double)0., N, C, LDC, 51 ); for (u=0; u<2; u++) { for (t=0; t<2; t++) { memcpy(Cinit, C, LDCxN*sizeof(double)); memcpy(Cfinal, C, LDCxN*sizeof(double)); /* PLASMA DSYR2K */ PLASMA_dsyr2k(uplo[u], trans[t], N, K, alpha, A, LDA, B, LDB, beta, Cfinal, LDC); /* Check the solution */ info_solution = check_solution(uplo[u], trans[t], N, K, alpha, A, LDA, B, LDB, beta, Cinit, Cfinal, LDC); if (info_solution == 0) { printf("***************************************************\n"); printf(" ---- TESTING DSYR2K (%5s, %s) ........... PASSED !\n", uplostr[u], transstr[t]); printf("***************************************************\n"); } else { printf("************************************************\n"); printf(" - TESTING DSYR2K (%5s, %s) ... FAILED !\n", uplostr[u], transstr[t]); printf("************************************************\n"); } } } free(A); free(B); free(C); free(Cinit); free(Cfinal); return 0; } /*-------------------------------------------------------------- * Check the solution */ static int check_solution(PLASMA_enum uplo, PLASMA_enum trans, int N, int K, double alpha, double *A, int LDA, double *B, int LDB, double beta, double *Cref, double *Cplasma, int LDC) { int info_solution; double Anorm, Bnorm, Cinitnorm, Cplasmanorm, Clapacknorm, Rnorm, result; double eps; double beta_const; double *work = (double *)malloc(max(N, K)* sizeof(double)); beta_const = -1.0; Anorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), (trans == PlasmaNoTrans) ? N : K, (trans == PlasmaNoTrans) ? K : N, A, LDA, work); Bnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), (trans == PlasmaNoTrans) ? N : K, (trans == PlasmaNoTrans) ? K : N, B, LDB, work); Cinitnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), N, N, Cref, LDC, work); Cplasmanorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), N, N, Cplasma, LDC, work); cblas_dsyr2k(CblasColMajor, (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans, N, K, (alpha), A, LDA, B, LDB, (beta), Cref, LDC); Clapacknorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), N, N, Cref, LDC, work); cblas_daxpy(LDC*N, (beta_const), Cplasma, 1, Cref, 1); Rnorm = LAPACKE_dlange_work(LAPACK_COL_MAJOR, lapack_const(PlasmaInfNorm), N, N, Cref, LDC, work); eps = LAPACKE_dlamch_work('e'); printf("Rnorm %e, Anorm %e, Cinitnorm %e, Cplasmanorm %e, Clapacknorm %e\n", Rnorm, Anorm, Cinitnorm, Cplasmanorm, Clapacknorm); result = Rnorm / ((Anorm + Bnorm + Cinitnorm) * N * eps); printf("============\n"); printf("Checking the norm of the difference against reference DSYR2K \n"); printf("-- ||Cplasma - Clapack||_oo/((||A||_oo+||C||_oo).N.eps) = %e \n", result); if ( isnan(Rnorm) || isinf(Rnorm) || isnan(result) || isinf(result) || (result > 10.0) ) { printf("-- The solution is suspicious ! \n"); info_solution = 1; } else { printf("-- The solution is CORRECT ! \n"); info_solution= 0 ; } free(work); return info_solution; }
act3.c
/* Owen Jauregui Borbon - A01638122 Program for calculating theapproximate area under the curve for a function using the trapezoidal rule. Parallelized using openMP. */ #include <stdio.h> #include <omp.h> int main() { // Number of threads for openMP int threads = 8; // Integration limits double left = 1; double right = 20; // Amount and size of steps for the approximation double stepAmount = 1000000; double diff = (right-left)/stepAmount; // Starting value for the approximation // using 5/x as example function double result = (5/left + 5/right)/2; // Use of openMP for parallelizing operations #pragma omp parallel shared(result) num_threads(threads) { // Parallelizing iterations #pragma omp for for(int i = 1; i < stepAmount; i++) { // Adding Y value for each step to te approximation result += 5/(left + i*diff); } } // Multiply the approximation and the step size for final result result *= diff; // Display result printf("Approximate result: %.4f \n", result); return 0; }
GB_queue_insert.c
//------------------------------------------------------------------------------ // GB_queue_insert: insert a matrix at the head of the matrix queue //------------------------------------------------------------------------------ // SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2018, All Rights Reserved. // http://suitesparse.com See GraphBLAS/Doc/License.txt for license. //------------------------------------------------------------------------------ // check if the matrix has pending computations (either pending tuples or // zombies, or both). If it has any, and if it is not already in the queue, // then insert it into the queue. #include "GB.h" void GB_queue_insert // insert matrix at the head of queue ( GrB_Matrix A // matrix to insert ) { //-------------------------------------------------------------------------- // check inputs //-------------------------------------------------------------------------- ASSERT (A != NULL) ; //-------------------------------------------------------------------------- // insert the matrix at the head of the queue //-------------------------------------------------------------------------- if ((A->npending > 0 || A->nzombies > 0) && !(A->enqueued)) { // A is not in the queue yet, but needs to be there #pragma omp critical (GB_queue) { // check again to be safe, then add A to the head of the queue if ((A->npending > 0 || A->nzombies > 0) && !(A->enqueued)) { // add the matrix to the head of the queue GrB_Matrix Head = (GrB_Matrix) (GB_Global.queue_head) ; A->queue_next = Head ; A->queue_prev = NULL ; A->enqueued = true ; if (Head != NULL) { Head->queue_prev = A ; } GB_Global.queue_head = A ; } } } }
single_misc_messages.c
// RUN: %clang_cc1 -fsyntax-only -fopenmp -verify %s -Wuninitialized // RUN: %clang_cc1 -fsyntax-only -fopenmp-simd -verify %s -Wuninitialized void xxx(int argc) { int x; // expected-note {{initialize the variable 'x' to silence this warning}} #pragma omp single argc = x; // expected-warning {{variable 'x' is uninitialized when used here}} } void foo(void); // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single // expected-error@+1 {{unexpected OpenMP directive '#pragma omp single'}} #pragma omp single foo void test_no_clause(void) { int i; #pragma omp single foo(); #pragma omp single ++i; } void test_branch_protected_scope(void) { int i = 0; L1: ++i; int x[24]; #pragma omp parallel #pragma omp single { if (i == 5) goto L1; // expected-error {{use of undeclared label 'L1'}} else if (i == 6) return; // expected-error {{cannot return from OpenMP region}} else if (i == 7) goto L2; else if (i == 8) { L2: x[i]++; } } if (x[0] == 0) goto L2; // expected-error {{use of undeclared label 'L2'}} else if (x[1] == 1) goto L1; } void test_invalid_clause(void) { int i; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single foo bar foo(); } void test_non_identifiers(void) { int i, x; #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single; foo(); #pragma omp parallel // expected-error@+2 {{unexpected OpenMP clause 'linear' in directive '#pragma omp single'}} // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single linear(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single private(x); foo(); #pragma omp parallel // expected-warning@+1 {{extra tokens at the end of '#pragma omp single' are ignored}} #pragma omp single, private(x); foo(); } void test_private(void) { int i; #pragma omp parallel // expected-error@+2 {{expected expression}} // expected-error@+1 {{expected ')'}} expected-note@+1 {{to match this '('}} #pragma omp single private( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single private(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single private(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single private(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single private(0) foo(); int x, y, z; #pragma omp parallel #pragma omp single private(x) foo(); #pragma omp parallel #pragma omp single private(x, y) foo(); #pragma omp parallel #pragma omp single private(x, y, z) foo(); } void test_firstprivate(void) { int i; #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 {{expected expression}} #pragma omp single firstprivate( foo(); #pragma omp parallel // expected-error@+2 {{expected ')'}} expected-note@+2 {{to match this '('}} // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, foo(); #pragma omp parallel // expected-error@+1 2 {{expected expression}} #pragma omp single firstprivate(, ) foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate() foo(); #pragma omp parallel // expected-error@+1 {{expected expression}} #pragma omp single firstprivate(int) foo(); #pragma omp parallel // expected-error@+1 {{expected variable name}} #pragma omp single firstprivate(0) foo(); } void test_nowait(void) { #pragma omp single nowait nowait // expected-error {{directive '#pragma omp single' cannot contain more than one 'nowait' clause}} for (int i = 0; i < 16; ++i) ; }
deconvolution_pack4to1.h
// Tencent is pleased to support the open source community by making ncnn available. // // Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved. // // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except // in compliance with the License. You may obtain a copy of the License at // // https://opensource.org/licenses/BSD-3-Clause // // Unless required by applicable law or agreed to in writing, software distributed // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR // CONDITIONS OF ANY KIND, either express or implied. See the License for the // specific language governing permissions and limitations under the License. static void deconvolution_pack4to1_msa(const Mat& bottom_blob, Mat& top_blob, const Mat& weight_data_pack4to1, const Mat& bias_data, int kernel_w, int kernel_h, int dilation_w, int dilation_h, int stride_w, int stride_h, int activation_type, const Mat& activation_params, const Option& opt) { int w = bottom_blob.w; int h = bottom_blob.h; int channels = bottom_blob.c; int outw = top_blob.w; int outh = top_blob.h; int outch = top_blob.c; const int kernel_extent_w = dilation_w * (kernel_w - 1) + 1; const int kernel_extent_h = dilation_h * (kernel_h - 1) + 1; const int maxk = kernel_w * kernel_h; const float* bias_data_ptr = bias_data; // num_output #pragma omp parallel for num_threads(opt.num_threads) for (int p = 0; p < outch; p++) { float* outptr = top_blob.channel(p); for (int i = 0; i < outh; i++) { for (int j = 0; j < outw; j++) { float sum = 0.f; if (bias_data_ptr) { sum = bias_data_ptr[p]; } v4f32 _sum = (v4f32)__msa_fill_w(0); const float* kptr = (const float*)weight_data_pack4to1 + maxk * channels * p * 4; // channels for (int q = 0; q < channels; q++) { const Mat m = bottom_blob.channel(q); for (int y = 0; y < kernel_h; y++) { int sys = (i + y * dilation_h - (kernel_extent_h - 1)); if (sys < 0 || sys % stride_h != 0) continue; int sy = sys / stride_h; if (sy >= h) continue; for (int x = 0; x < kernel_w; x++) { int sxs = (j + x * dilation_w - (kernel_extent_w - 1)); if (sxs < 0 || sxs % stride_w != 0) continue; int sx = sxs / stride_w; if (sx >= w) continue; const float* sptr = m.row(sy) + sx * 4; int k = y * kernel_w + x; v4f32 _val = (v4f32)__msa_ld_w(sptr, 0); v4f32 _w = (v4f32)__msa_ld_w(kptr + k * 4, 0); _sum = __msa_fmadd_w(_sum, _val, _w); } } kptr += maxk * 4; } sum += __msa_fhadd_w(_sum); sum = activation_ss(sum, activation_type, activation_params); outptr[j] = sum; } outptr += outw; } } }