source stringlengths 3 92 | c stringlengths 26 2.25M |
|---|---|
geo_particle_iter_mass.kernel_runtime.c | #include <omp.h>
#include <stdio.h>
#include <stdlib.h>
#include "local_header.h"
#include "openmp_pscmc_inc.h"
#include "geo_particle_iter_mass.kernel_inc.h"
int openmp_relng_1st_sg2_small_grids_init (openmp_pscmc_env * pe ,openmp_relng_1st_sg2_small_grids_struct * kerstr ){
return 0 ;}
void openmp_relng_1st_sg2_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_relng_1st_sg2_small_grids_struct ));
}
int openmp_relng_1st_sg2_small_grids_get_num_compute_units (openmp_relng_1st_sg2_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_relng_1st_sg2_small_grids_get_xlen (){
return 1 ;}
int openmp_relng_1st_sg2_small_grids_exec (openmp_relng_1st_sg2_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_relng_1st_sg2_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->LFoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_inoutput (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_xyzw (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_cu_cache (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_cu_xyzw (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_xoffset (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_yoffset (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_zoffset (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_fieldE (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_fieldB (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_fieldB1 (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_LFoutJ (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_XLEN (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_YLEN (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_ZLEN (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_ovlp (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_numvec (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_num_ele (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_grid_cache_len (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_cu_cache_length (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_DELTA_X (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_DELTA_Y (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_DELTA_Z (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_Mass0 (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_Charge0 (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_Deltat (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_Tori_X0 (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_relng_1st_sg2_small_grids_scmc_set_parameter_Solve_Err (openmp_relng_1st_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
int openmp_relng_1st_small_grids_init (openmp_pscmc_env * pe ,openmp_relng_1st_small_grids_struct * kerstr ){
return 0 ;}
void openmp_relng_1st_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_relng_1st_small_grids_struct ));
}
int openmp_relng_1st_small_grids_get_num_compute_units (openmp_relng_1st_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_relng_1st_small_grids_get_xlen (){
return 1 ;}
int openmp_relng_1st_small_grids_exec (openmp_relng_1st_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_relng_1st_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->LFoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_relng_1st_small_grids_scmc_set_parameter_inoutput (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_xyzw (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_cu_cache (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_cu_xyzw (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_xoffset (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_yoffset (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_zoffset (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_fieldE (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_fieldB (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_fieldB1 (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_LFoutJ (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_XLEN (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_YLEN (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_ZLEN (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_ovlp (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_numvec (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_num_ele (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_grid_cache_len (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_cu_cache_length (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_DELTA_X (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_DELTA_Y (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_DELTA_Z (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_Mass0 (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_Charge0 (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_Deltat (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_Tori_X0 (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_relng_1st_small_grids_scmc_set_parameter_Solve_Err (openmp_relng_1st_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_init (openmp_pscmc_env * pe ,openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ){
return 0 ;}
void openmp_geo_rel_1st_bwd_sg2_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_geo_rel_1st_bwd_sg2_small_grids_struct ));
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_get_num_compute_units (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_geo_rel_1st_bwd_sg2_small_grids_get_xlen (){
return 1 ;}
int openmp_geo_rel_1st_bwd_sg2_small_grids_exec (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->LFoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_inoutput (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_xyzw (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_cu_cache (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_cu_xyzw (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_xoffset (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_yoffset (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_zoffset (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_fieldE (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_fieldB (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_fieldB1 (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_LFoutJ (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_XLEN (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_YLEN (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_ZLEN (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_ovlp (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_numvec (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_num_ele (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_grid_cache_len (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_cu_cache_length (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_DELTA_X (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_DELTA_Y (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_DELTA_Z (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_Mass0 (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_Charge0 (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_Deltat (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_Tori_X0 (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_sg2_small_grids_scmc_set_parameter_Solve_Err (openmp_geo_rel_1st_bwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_init (openmp_pscmc_env * pe ,openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ){
return 0 ;}
void openmp_geo_rel_1st_bwd_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_geo_rel_1st_bwd_small_grids_struct ));
}
int openmp_geo_rel_1st_bwd_small_grids_get_num_compute_units (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_geo_rel_1st_bwd_small_grids_get_xlen (){
return 1 ;}
int openmp_geo_rel_1st_bwd_small_grids_exec (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_geo_rel_1st_bwd_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->LFoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_inoutput (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_xyzw (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_cu_cache (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_cu_xyzw (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_xoffset (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_yoffset (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_zoffset (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_fieldE (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_fieldB (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_fieldB1 (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_LFoutJ (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_XLEN (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_YLEN (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_ZLEN (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_ovlp (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_numvec (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_num_ele (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_grid_cache_len (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_cu_cache_length (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_DELTA_X (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_DELTA_Y (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_DELTA_Z (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_Mass0 (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_Charge0 (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_Deltat (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_Tori_X0 (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_geo_rel_1st_bwd_small_grids_scmc_set_parameter_Solve_Err (openmp_geo_rel_1st_bwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_init (openmp_pscmc_env * pe ,openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ){
return 0 ;}
void openmp_geo_rel_1st_fwd_sg2_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_geo_rel_1st_fwd_sg2_small_grids_struct ));
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_get_num_compute_units (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_geo_rel_1st_fwd_sg2_small_grids_get_xlen (){
return 1 ;}
int openmp_geo_rel_1st_fwd_sg2_small_grids_exec (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->LFoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_inoutput (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_xyzw (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_cu_cache (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_cu_xyzw (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_xoffset (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_yoffset (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_zoffset (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_fieldE (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_fieldB (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_fieldB1 (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_LFoutJ (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_XLEN (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_YLEN (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_ZLEN (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_ovlp (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_numvec (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_num_ele (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_grid_cache_len (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_cu_cache_length (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_DELTA_X (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_DELTA_Y (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_DELTA_Z (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_Mass0 (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_Charge0 (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_Deltat (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_Tori_X0 (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_sg2_small_grids_scmc_set_parameter_Solve_Err (openmp_geo_rel_1st_fwd_sg2_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_init (openmp_pscmc_env * pe ,openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ){
return 0 ;}
void openmp_geo_rel_1st_fwd_small_grids_get_struct_len (size_t * len ){
((len)[0] = sizeof(openmp_geo_rel_1st_fwd_small_grids_struct ));
}
int openmp_geo_rel_1st_fwd_small_grids_get_num_compute_units (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ){
return omp_get_max_threads ( ) ;}
int openmp_geo_rel_1st_fwd_small_grids_get_xlen (){
return 1 ;}
int openmp_geo_rel_1st_fwd_small_grids_exec (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,long scmc_internal_g_xlen ,long scmc_internal_g_ylen ){
#pragma omp parallel
{
int xid ;
int yid ;
int numt = omp_get_num_threads ( ) ;
int tid = omp_get_thread_num ( ) ;
int ysingle = ( ( scmc_internal_g_ylen + ( numt - 1 ) ) / numt ) ;
int ymin = ( tid * ysingle ) ;
int ymax = ( ( 1 + tid ) * ysingle ) ;
for ((yid = tid) ; ( yid < scmc_internal_g_ylen ) ; (yid = ( yid + numt )))
{
for ((xid = 0) ; ( xid < scmc_internal_g_xlen ) ; (xid = ( xid + 1 )))
{
openmp_geo_rel_1st_fwd_small_grids_scmc_kernel ( ( kerstr )->inoutput , ( kerstr )->xyzw , ( kerstr )->cu_cache , ( kerstr )->cu_xyzw , ( kerstr )->xoffset , ( kerstr )->yoffset , ( kerstr )->zoffset , ( kerstr )->fieldE , ( kerstr )->fieldB , ( kerstr )->fieldB1 , ( kerstr )->LFoutJ , ( ( kerstr )->XLEN)[0] , ( ( kerstr )->YLEN)[0] , ( ( kerstr )->ZLEN)[0] , ( ( kerstr )->ovlp)[0] , ( ( kerstr )->numvec)[0] , ( ( kerstr )->num_ele)[0] , ( ( kerstr )->grid_cache_len)[0] , ( ( kerstr )->cu_cache_length)[0] , ( ( kerstr )->DELTA_X)[0] , ( ( kerstr )->DELTA_Y)[0] , ( ( kerstr )->DELTA_Z)[0] , ( ( kerstr )->Mass0)[0] , ( ( kerstr )->Charge0)[0] , ( ( kerstr )->Deltat)[0] , ( ( kerstr )->Tori_X0)[0] , ( ( kerstr )->Solve_Err)[0] , yid , scmc_internal_g_ylen );
}}} return 0 ;}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_inoutput (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->inoutput = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_xyzw (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xyzw = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_cu_cache (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_cu_xyzw (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_xyzw = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_xoffset (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->xoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_yoffset (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->yoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_zoffset (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->zoffset = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_fieldE (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldE = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_fieldB (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_fieldB1 (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->fieldB1 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_LFoutJ (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->LFoutJ = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_XLEN (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->XLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_YLEN (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->YLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_ZLEN (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ZLEN = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_ovlp (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->ovlp = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_numvec (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->numvec = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_num_ele (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->num_ele = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_grid_cache_len (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->grid_cache_len = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_cu_cache_length (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->cu_cache_length = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_DELTA_X (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_X = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_DELTA_Y (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Y = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_DELTA_Z (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->DELTA_Z = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_Mass0 (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Mass0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_Charge0 (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Charge0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_Deltat (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Deltat = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_Tori_X0 (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Tori_X0 = pm->d_data);
}
int openmp_geo_rel_1st_fwd_small_grids_scmc_set_parameter_Solve_Err (openmp_geo_rel_1st_fwd_small_grids_struct * kerstr ,openmp_pscmc_mem * pm ){
( ( kerstr )->Solve_Err = pm->d_data);
}
|
functions.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <string.h>
#include "functions.h"
//compute a*b mod p safely
unsigned int modprod(unsigned int a, unsigned int b, unsigned int p) {
unsigned int za = a;
unsigned int ab = 0;
while (b > 0) {
if (b%2 == 1) ab = (ab + za) % p;
za = (2 * za) % p;
b /= 2;
}
return ab;
}
//compute a^b mod p safely
unsigned int modExp(unsigned int a, unsigned int b, unsigned int p) {
unsigned int z = a;
unsigned int aExpb = 1;
while (b > 0) {
if (b%2 == 1) aExpb = modprod(aExpb, z, p);
z = modprod(z, z, p);
b /= 2;
}
return aExpb;
}
//returns either 0 or 1 randomly
unsigned int randomBit() {
return rand()%2;
}
//returns a random integer which is between 2^{n-1} and 2^{n}
unsigned int randXbitInt(unsigned int n) {
unsigned int r = 1;
for (unsigned int i=0; i<n-1; i++) {
r = r*2 + randomBit();
}
return r;
}
//tests for primality and return 1 if N is probably prime and 0 if N is composite
unsigned int isProbablyPrime(unsigned int N) {
if (N%2==0) return 0; //not interested in even numbers (including 2)
unsigned int NsmallPrimes = 168;
unsigned int smallPrimeList[168] = {2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31,
37, 41, 43, 47, 53, 59, 61, 67, 71, 73,
79, 83, 89, 97, 101, 103, 107, 109, 113,
127, 131, 137, 139, 149, 151, 157, 163,
167, 173, 179, 181, 191, 193, 197, 199,
211, 223, 227, 229, 233, 239, 241, 251,
257, 263, 269, 271, 277, 281, 283, 293,
307, 311, 313, 317, 331, 337, 347, 349,
353, 359, 367, 373, 379, 383, 389, 397,
401, 409, 419, 421, 431, 433, 439, 443,
449, 457, 461, 463, 467, 479, 487, 491,
499, 503, 509, 521, 523, 541, 547, 557,
563, 569, 571, 577, 587, 593, 599, 601,
607, 613, 617, 619, 631, 641, 643, 647,
653, 659, 661, 673, 677, 683, 691, 701,
709, 719, 727, 733, 739, 743, 751, 757,
761, 769, 773, 787, 797, 809, 811, 821,
823, 827, 829, 839, 853, 857, 859, 863,
877, 881, 883, 887, 907, 911, 919, 929,
937, 941, 947, 953, 967, 971, 977, 983,
991, 997};
//before using a probablistic primality check, check directly using the small primes list
for (unsigned int n=1;n<NsmallPrimes;n++) {
if (N==smallPrimeList[n]) return 1; //true
if (N%smallPrimeList[n]==0) return 0; //false
}
//if we're testing a large number switch to Miller-Rabin primality test
unsigned int r = 0;
unsigned int d = N-1;
while (d%2 == 0) {
d /= 2;
r += 1;
}
for (unsigned int n=0;n<NsmallPrimes;n++) {
unsigned int k = smallPrimeList[n];
unsigned int x = modExp(k,d,N);
if ((x==1) || (x==N-1)) continue;
for (unsigned int i=1;i<r-1;i++) {
x = modprod(x,x,N);
if (x == 1) return 0; //false
if (x == N-1) break;
}
// see whether we left the loop becasue x==N-1
if (x == N-1) continue;
return 0; //false
}
return 1; //true
}
//Finds a generator of Z_p using the assumption that p=2*q+1
unsigned int findGenerator(unsigned int p) {
unsigned int g;
unsigned int q = (p-1)/2;
do {
//make a random number 1<= g < p
g = randXbitInt(32)%p; //could also have passed n to findGenerator
} while (g==0 || (modExp(g,q,p)==1) || (modExp(g,2,p)==1));
return g;
}
void setupElGamal(unsigned int n, unsigned int *p, unsigned int *g,
unsigned int *h, unsigned int *x) {
/* Use isProbablyPrime and randomXbitInt to find a new random n-bit prime number
which satisfies p=2*q+1 where q is also prime */
unsigned int q;
do {
*p = randXbitInt(n);
q = (*p-1)/2;
} while (!isProbablyPrime(*p) || !isProbablyPrime(q));
/* Use the fact that p=2*q+1 to quickly find a generator */
*g = findGenerator(*p);
//pick a secret key, x
*x = randXbitInt(n)%(*p);
//compute h
*h = modExp(*g,*x,*p);
printf("ElGamal Setup successful.\n");
printf("p = %u. \n", *p);
printf("g = %u is a generator of Z_%u \n", *g, *p);
printf("Secret key: x = %u \n", *x);
printf("h = g^x = %u\n", *h);
printf("\n");
}
void ElGamalEncrypt(unsigned int *m, unsigned int *a, unsigned int Nints,
unsigned int p, unsigned int g, unsigned int h) {
/* Q2.1 Parallelize this function with OpenMP */
for (unsigned int i=0; i<Nints;i++) {
//pick y in Z_p randomly
unsigned int y;
do {
y = randXbitInt(32)%p;
} while (y==0); //dont allow y=0
//compute a = g^y
a[i] = modExp(g,y,p);
//compute s = h^y
unsigned int s = modExp(h,y,p);
//encrypt m by multiplying with s
m[i] = modprod(m[i],s,p);
}
}
void ElGamalDecrypt(unsigned int *m, unsigned int *a, unsigned int Nints,
unsigned int p, unsigned int x) {
/* Q2.1 Parallelize this function with OpenMP */
for (unsigned int i=0; i<Nints;i++) {
//compute s = a^x
unsigned int s = modExp(a[i],x,p);
//compute s^{-1} = s^{p-2}
unsigned int invS = modExp(s,p-2,p);
//decrypt message by multplying by invS
m[i] = modprod(m[i],invS,p);
}
}
//Pad the end of string so its length is divisible by Nchars
// Assume there is enough allocated storage for the padded string
void padString(unsigned char* string, unsigned int charsPerInt) {
/* Q1.2 Complete this function */
while(strlen(string)%charsPerInt != 0 ) {
strcat(string," ");
}
}
void convertStringToZ(unsigned char *string, unsigned int Nchars,
unsigned int *Z, unsigned int Nints) {
/* Q1.3 Complete this function */
/* Q2.2 Parallelize this function with OpenMP */
unsigned int charperint = Nchars/Nints;
#pragma omp parallel for shared(Z)
for (unsigned int x = 0; x < Nchars; x++) {
Z[x/charperint] = Z[x/charperint] | (string[x]<<((charperint-1-x%charperint)*9));
}
}
void convertZToString(unsigned int *Z, unsigned int Nints,
unsigned char *string, unsigned int Nchars) {
/* Q1.4 Complete this function */
/* Q2.2 Parallelize this function with OpenMP */
unsigned int charperint = Nchars/Nints;
#pragma omp parallel for shared(string)
for (int x = 0; x < Nchars; x++) {
string[x] = (unsigned char) (Z[x/charperint]>>(9*(charperint-1-x%charperint)));
}
}
|
comm.h | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/**
* Copyright (c) 2015 by Contributors
*/
#ifndef MXNET_KVSTORE_COMM_H_
#define MXNET_KVSTORE_COMM_H_
#include <dmlc/omp.h>
#include <string>
#include <algorithm>
#include <utility>
#include <limits>
#include <vector>
#include <tuple>
#include <thread>
#include "mxnet/ndarray.h"
#include "gradient_compression.h"
#include "../ndarray/ndarray_function.h"
#include "../operator/tensor/sparse_retain-inl.h"
#include "./kvstore_utils.h"
namespace mxnet {
namespace kvstore {
/**
* \brief multiple device commmunication
*/
class Comm {
public:
Comm() {
pinned_ctx_ = Context::CPUPinned(0);
}
virtual ~Comm() { }
/**
* \brief init key with the data shape and storage shape
*/
virtual void Init(int key, const NDArrayStorageType stype,
const TShape& shape, int dtype = mshadow::kFloat32) = 0;
/**
* \brief returns src[0] + .. + src[src.size()-1]
*/
virtual const NDArray& Reduce(
int key, const std::vector<NDArray>& src, int priority) = 0;
/**
* \brief copy from src to dst[i] for every i
*/
virtual void Broadcast(
int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) = 0;
/**
* \brief broadcast src to dst[i] with target row_ids for every i
* \param key the identifier key for the stored ndarray
* \param src the source row_sparse ndarray to broadcast
* \param dst a list of destination row_sparse NDArray and its target row_ids to broadcast,
where the row_ids are expected to be unique and sorted in row_id.data()
* \param priority the priority of the operation
*/
virtual void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) = 0;
/**
* \brief return a pinned contex
*/
Context pinned_ctx() const {
return pinned_ctx_;
}
/**
* \brief Sets gradient compression parameters to be able to
* perform reduce with compressed gradients
*/
void SetGradientCompression(std::shared_ptr<GradientCompression> gc) {
gc_ = gc;
}
protected:
Context pinned_ctx_;
std::shared_ptr<GradientCompression> gc_;
};
/**
* \brief an implemention of Comm that first copy data to CPU memeory, and then
* reduce there
*/
class CommCPU : public Comm {
public:
CommCPU() {
nthread_reduction_ = dmlc::GetEnv("MXNET_KVSTORE_REDUCTION_NTHREADS", 4);
bigarray_bound_ = dmlc::GetEnv("MXNET_KVSTORE_BIGARRAY_BOUND", 1000 * 1000);
// TODO(junwu) delete the following data member, now for benchmark only
is_serial_push_ = dmlc::GetEnv("MXNET_KVSTORE_SERIAL_PUSH", 0);
}
virtual ~CommCPU() { }
void Init(int key, const NDArrayStorageType stype, const TShape& shape,
int type = mshadow::kFloat32) override {
// Delayed allocation - the dense merged buffer might not be used at all if push()
// only sees sparse arrays
bool delay_alloc = true;
merge_buf_[key].merged = NDArray(shape, pinned_ctx_, delay_alloc, type);
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
auto& buf = merge_buf_[key];
const auto stype = src[0].storage_type();
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
if (stype == kDefaultStorage) {
return src[0];
} else {
// With 'local' kvstore, we could store the weight on CPU while compute
// the gradient on GPU when the weight is extremely large.
// To avoiding copying the weight to the same context of the gradient,
// we always copy the gradient to merged buf.
NDArray& merged = buf.merged_buf(stype);
CopyFromTo(src[0], &merged, priority);
return merged;
}
}
NDArray& buf_merged = buf.merged_buf(stype);
// normal dense reduce
if (stype == kDefaultStorage) {
std::vector<Engine::VarHandle> const_vars(src.size() - 1);
std::vector<NDArray> reduce(src.size());
CopyFromTo(src[0], &buf_merged, priority);
reduce[0] = buf_merged;
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size()-1);
for (size_t j = 0; j < src.size() - 1; ++j) {
// allocate copy buffer
buf.copy_buf[j] = NDArray(
src[0].shape(), pinned_ctx_, false, src[0].dtype());
}
}
CHECK(stype == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << stype << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 1; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i-1]), priority);
reduce[i] = buf.copy_buf[i-1];
const_vars[i-1] = reduce[i].var();
}
Engine::Get()->PushAsync(
[reduce, this](RunContext rctx, Engine::CallbackOnComplete on_complete) {
ReduceSumCPU(reduce);
on_complete();
}, Context::CPU(), const_vars, {reduce[0].var()},
FnProperty::kCPUPrioritized, priority, "KVStoreReduce");
} else {
// sparse reduce
std::vector<Engine::VarHandle> const_vars(src.size());
std::vector<NDArray> reduce(src.size());
if (buf.copy_buf.empty()) {
buf.copy_buf.resize(src.size());
for (size_t j = 0; j < src.size(); ++j) {
buf.copy_buf[j] = NDArray(
src[0].storage_type(), src[0].shape(), pinned_ctx_, true, src[0].dtype());
}
}
CHECK(stype == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << stype << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 0; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
const_vars[i] = reduce[i].var();
}
Resource rsc = ResourceManager::Get()->Request(buf_merged.ctx(),
ResourceRequest(ResourceRequest::kTempSpace));
Engine::Get()->PushAsync(
[reduce, buf_merged, rsc, this](RunContext rctx, Engine::CallbackOnComplete on_complete) {
NDArray out = buf_merged;
is_serial_push_?
ReduceSumCPUExSerial(reduce, &out)
: mxnet::ndarray::ElementwiseSum(rctx.get_stream<cpu>(), rsc, reduce, &out);
on_complete();
}, Context::CPU(), const_vars, {buf_merged.var(), rsc.var},
FnProperty::kCPUPrioritized, priority, "KVStoreReduce");
}
return buf_merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
int mask = src.ctx().dev_mask();
if (mask == Context::kCPU) {
for (auto d : dst) CopyFromTo(src, d, priority);
} else {
// First copy data to pinned_ctx, then broadcast.
// Note that kv.init initializes the data on pinned_ctx.
// This branch indicates push() with ndarrays on gpus were called,
// and the source is copied to gpu ctx.
// Also indicates that buffers are already initialized during push().
auto& buf = merge_buf_[key].merged_buf(src.storage_type());
CopyFromTo(src, &buf, priority);
for (auto d : dst) CopyFromTo(buf, d, priority);
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) override {
using namespace mshadow;
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row-sparse src NDArray";
CHECK_EQ(src.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with src on gpu context not supported";
for (size_t i = 0; i < dst.size(); ++i) {
NDArray* out = dst[i].first;
NDArray row_id = dst[i].second;
CHECK_EQ(out->storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row_sparse dst NDArray";
CHECK_EQ(row_id.ctx().dev_mask(), Context::kCPU)
<< "BroadcastRowSparse with row_indices on gpu context not supported";
// retain according to unique indices
const bool is_same_ctx = out->ctx() == src.ctx();
const bool is_diff_var = out->var() != src.var();
NDArray retained_cpu = (is_same_ctx && is_diff_var) ? *out :
NDArray(kRowSparseStorage, src.shape(), src.ctx(), true,
src.dtype(), src.aux_types());
if (!is_diff_var) {
common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) +
"refers to the same NDArray as the one stored in KVStore."
"Performing row_sparse_pull() with such output is going to change the "
"data stored in KVStore. Incorrect result may be generated "
"next time row_sparse_pull() is called. To avoid such an issue,"
"consider create a new NDArray buffer to store the output.");
}
Engine::Get()->PushAsync(
[=](RunContext rctx, Engine::CallbackOnComplete on_complete) {
const TBlob& indices = row_id.data();
NDArray temp = retained_cpu; // get rid the of const qualifier
op::SparseRetainOpForwardRspImpl<cpu>(rctx.get_stream<cpu>(),
src, indices, kWriteTo,
&temp);
on_complete();
}, Context::CPU(), {src.var(), row_id.var()}, {retained_cpu.var()},
FnProperty::kNormal, priority, "KVStoreSparseRetain");
// if retained_cpu == out, CopyFromTo will ignore the copy operation
CopyFromTo(retained_cpu, out, priority);
}
}
private:
// reduce sum into val[0]
inline void ReduceSumCPU(const std::vector<NDArray> &in_data) {
MSHADOW_TYPE_SWITCH(in_data[0].dtype(), DType, {
std::vector<DType*> dptr(in_data.size());
for (size_t i = 0; i < in_data.size(); ++i) {
TBlob data = in_data[i].data();
CHECK(data.CheckContiguous());
dptr[i] = data.FlatTo2D<cpu, DType>().dptr_;
}
size_t total = in_data[0].shape().Size();
ReduceSumCPUImpl(dptr, total);
});
}
// serial implementation of reduce sum for row sparse NDArray.
inline void ReduceSumCPUExSerial(const std::vector<NDArray> &in, NDArray *out) {
using namespace rowsparse;
using namespace mshadow;
auto stype = out->storage_type();
CHECK_EQ(stype, kRowSparseStorage) << "Unexpected storage type " << stype;
size_t total_num_rows = 0;
size_t num_in = in.size();
// skip the ones with empty indices and values
std::vector<bool> skip(num_in, false);
// the values tensor of the inputs
MSHADOW_TYPE_SWITCH(out->dtype(), DType, {
MSHADOW_IDX_TYPE_SWITCH(out->aux_type(kIdx), IType, {
std::vector<Tensor<cpu, 2, DType>> in_vals(num_in);
std::vector<Tensor<cpu, 1, IType>> in_indices(num_in);
// offset to the values tensor of all inputs
std::vector<size_t> offsets(num_in, 0);
std::vector<size_t> num_rows(num_in, 0);
for (size_t i = 0; i < num_in; i++) {
if (!in[i].storage_initialized()) {
skip[i] = true;
continue;
}
auto size = in[i].aux_shape(kIdx).Size();
num_rows[i] = size;
total_num_rows += size;
in_vals[i] = in[i].data().FlatTo2D<cpu, DType>();
in_indices[i] = in[i].aux_data(kIdx).FlatTo1D<cpu, IType>();
}
std::vector<IType> indices;
indices.reserve(total_num_rows);
// gather indices from all inputs
for (size_t i = 0; i < num_in; i++) {
for (size_t j = 0; j < num_rows[i]; j++) {
indices.emplace_back(in_indices[i][j]);
}
}
CHECK_EQ(indices.size(), total_num_rows);
// dedup indices
std::sort(indices.begin(), indices.end());
indices.resize(std::unique(indices.begin(), indices.end()) - indices.begin());
// the one left are unique non-zero rows
size_t nnr = indices.size();
// allocate memory for output
out->CheckAndAlloc({Shape1(nnr)});
auto idx_data = out->aux_data(kIdx).FlatTo1D<cpu, IType>();
auto val_data = out->data().FlatTo2D<cpu, DType>();
for (size_t i = 0; i < nnr; i++) {
// copy indices back
idx_data[i] = indices[i];
bool zeros = true;
for (size_t j = 0; j < num_in; j++) {
if (skip[j]) continue;
size_t offset = offsets[j];
if (offset < num_rows[j]) {
if (indices[i] == in_indices[j][offset]) {
if (zeros) {
Copy(val_data[i], in_vals[j][offset], nullptr);
zeros = false;
} else {
val_data[i] += in_vals[j][offset];
}
offsets[j] += 1;
}
}
}
}
});
});
}
template<typename DType>
inline static void ReduceSumCPU(
const std::vector<DType*> &dptr, size_t offset, index_t size) {
using namespace mshadow; // NOLINT(*)
Tensor<cpu, 1, DType> in_0(dptr[0] + offset, Shape1(size));
for (size_t i = 1; i < dptr.size(); i+=4) {
switch (dptr.size() - i) {
case 1: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
in_0 += in_1;
break;
}
case 2: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
in_0 += in_1 + in_2;
break;
}
case 3: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3;
break;
}
default: {
Tensor<cpu, 1, DType> in_1(dptr[i] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_2(dptr[i+1] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_3(dptr[i+2] + offset, Shape1(size));
Tensor<cpu, 1, DType> in_4(dptr[i+3] + offset, Shape1(size));
in_0 += in_1 + in_2 + in_3 + in_4;
break;
}
}
}
}
template<typename DType>
inline void ReduceSumCPUImpl(std::vector<DType*> dptr, size_t total) {
const size_t step = std::min(bigarray_bound_, static_cast<size_t>(4 << 10));
long ntask = (total + step - 1) / step; // NOLINT(*)
if (total < bigarray_bound_ || nthread_reduction_ <= 1) {
ReduceSumCPU(dptr, 0, total);
} else {
#pragma omp parallel for schedule(static) num_threads(nthread_reduction_)
for (long j = 0; j < ntask; ++j) { // NOLINT(*)
size_t k = static_cast<size_t>(j);
size_t begin = std::min(k * step, total);
size_t end = std::min((k + 1) * step, total);
if (j == ntask - 1) CHECK_EQ(end, total);
ReduceSumCPU(dptr, begin, static_cast<index_t>(end - begin));
}
}
}
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the merged value
NDArray merged;
/// \brief the cpu buffer for gpu data
std::vector<NDArray> copy_buf;
/// \brief the merged buffer for the given storage type
inline NDArray& merged_buf(NDArrayStorageType stype) {
if (stype == kDefaultStorage) {
return merged;
}
CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype;
// check if sparse_merged is initialized
if (sparse_merged.is_none()) {
CHECK(!merged.is_none());
sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(),
true, merged.dtype());
}
return sparse_merged;
}
private:
/// \brief the sparse merged value
NDArray sparse_merged;
};
std::unordered_map<int, BufferEntry> merge_buf_;
size_t bigarray_bound_;
int nthread_reduction_;
bool is_serial_push_;
};
/**
* \brief an implementation of Comm that performs reduction on device
* directly.
*
* It is faster if the total device-to-device bandwidths is larger than
* device-to-cpu, which is often true for 4 or 8 GPUs. But it uses more device
* memory.
*/
class CommDevice : public Comm {
public:
CommDevice() {
inited_ = false;
}
virtual ~CommDevice() { }
void Init(int key, const NDArrayStorageType stype, const TShape& shape,
int dtype = mshadow::kFloat32) override {
sorted_key_attrs_.emplace_back(key, shape, dtype);
}
void InitBuffersAndComm(const std::vector<NDArray>& src) {
if (!inited_) {
std::vector<Context> devs;
for (const auto& a : src) {
devs.push_back(a.ctx());
}
InitMergeBuffer(devs);
if (dmlc::GetEnv("MXNET_ENABLE_GPU_P2P", 1)) {
EnableP2P(devs);
}
}
}
const NDArray& Reduce(int key, const std::vector<NDArray>& src,
int priority) override {
// when this reduce is called from kvstore_dist, gc is not set
// we don't do compression twice in dist_sync_device
if ((gc_ != nullptr) && (gc_->get_type() != CompressionType::kNone)) {
return ReduceCompressed(key, src, priority);
}
// avoid extra copy for single device, but it may bring problems for
// abnormal usage of kvstore
if (src.size() == 1) {
return src[0];
}
InitBuffersAndComm(src);
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
const NDArrayStorageType stype = src[0].storage_type();
NDArray& buf_merged = buf.merged_buf(stype);
// normal dense reduce
if (stype == kDefaultStorage) {
CopyFromTo(src[0], &buf_merged, priority);
reduce[0] = buf_merged;
if (buf.copy_buf.empty()) {
// TODO(mli) this results in large device memory usage for huge ndarray,
// such as the largest fullc in VGG. consider to do segment reduce with
// NDArray.Slice or gpu direct memory access. for the latter, we need to
// remove some ctx check, and also it reduces 20% perf
buf.copy_buf.resize(src.size()-1);
for (size_t i = 0; i < src.size()-1; ++i) {
buf.copy_buf[i] = NDArray(
buf_merged.shape(), buf_merged.ctx(), false, buf_merged.dtype());
}
}
for (size_t i = 0; i < src.size()-1; ++i) {
CopyFromTo(src[i+1], &(buf.copy_buf[i]), priority);
reduce[i+1] = buf.copy_buf[i];
}
} else {
// sparse reduce
if (buf.copy_buf.empty()) {
// initialize buffer for copying during reduce
buf.copy_buf.resize(src.size());
for (size_t j = 0; j < src.size(); ++j) {
buf.copy_buf[j] = NDArray(stype, src[0].shape(), buf_merged.ctx(), true, src[0].dtype());
}
}
CHECK(src[0].storage_type() == buf.copy_buf[0].storage_type())
<< "Storage type mismatch detected. " << src[0].storage_type() << "(src) vs. "
<< buf.copy_buf[0].storage_type() << "(buf.copy_buf)";
for (size_t i = 0; i < src.size(); ++i) {
CopyFromTo(src[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
}
}
ElementwiseSum(reduce, &buf_merged, priority);
return buf_merged;
}
const NDArray& ReduceCompressed(int key, const std::vector<NDArray>& src,
int priority) {
InitBuffersAndComm(src);
auto& buf = merge_buf_[key];
std::vector<NDArray> reduce(src.size());
if (buf.copy_buf.empty()) {
// one buf for each context
buf.copy_buf.resize(src.size());
buf.compressed_recv_buf.resize(src.size());
buf.compressed_send_buf.resize(src.size());
buf.residual.resize(src.size());
for (size_t i = 0; i < src.size(); ++i) {
buf.copy_buf[i] = NDArray(buf.merged.shape(), buf.merged.ctx(),
false, buf.merged.dtype());
buf.residual[i] = NDArray(buf.merged.shape(), src[i].ctx(),
false, buf.merged.dtype());
buf.residual[i] = 0;
int64_t small_size = gc_->GetCompressedSize(buf.merged.shape().Size());
buf.compressed_recv_buf[i] = NDArray(TShape{small_size}, buf.merged.ctx(),
false, buf.merged.dtype());
buf.compressed_send_buf[i] = NDArray(TShape{small_size}, src[i].ctx(),
false, buf.merged.dtype());
}
}
for (size_t i = 0; i < src.size(); ++i) {
// compress before copy
// this is done even if the data is on same context as copy_buf because
// we don't want the training to be biased towards data on this GPU
gc_->Quantize(src[i], &(buf.compressed_send_buf[i]), &(buf.residual[i]), priority);
if (buf.compressed_send_buf[i].ctx() != buf.compressed_recv_buf[i].ctx()) {
CopyFromTo(buf.compressed_send_buf[i], &(buf.compressed_recv_buf[i]), priority);
} else {
// avoid memory copy when they are on same context
buf.compressed_recv_buf[i] = buf.compressed_send_buf[i];
}
gc_->Dequantize(buf.compressed_recv_buf[i], &(buf.copy_buf[i]), priority);
reduce[i] = buf.copy_buf[i];
}
ElementwiseSum(reduce, &buf.merged);
return buf.merged;
}
void Broadcast(int key, const NDArray& src,
const std::vector<NDArray*> dst, int priority) override {
if (!inited_) {
// copy to a random device first
int dev_id = key % dst.size();
CopyFromTo(src, dst[dev_id], priority);
for (size_t i = 0; i < dst.size(); ++i) {
if (i != static_cast<size_t>(dev_id)) {
CopyFromTo(*dst[dev_id], dst[i], priority);
}
}
} else {
auto& buf_merged = merge_buf_[key].merged_buf(src.storage_type());
CopyFromTo(src, &buf_merged, priority);
for (auto d : dst) {
CopyFromTo(buf_merged, d, priority);
}
}
}
void BroadcastRowSparse(int key, const NDArray& src,
const std::vector<std::pair<NDArray*, NDArray>>& dst,
const int priority) override {
CHECK_EQ(src.storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row-sparse src NDArray";
for (size_t i = 0; i < dst.size(); ++i) {
NDArray* out = dst[i].first;
NDArray row_id = dst[i].second;
CHECK_EQ(out->storage_type(), kRowSparseStorage)
<< "BroadcastRowSparse expects row_sparse dst NDArray";
CHECK_EQ(row_id.ctx(), src.ctx())
<< "row_id and src are expected to be on the same context";
// retain according to indices
const bool is_same_ctx = out->ctx() == src.ctx();
const bool is_diff_var = out->var() != src.var();
NDArray retained_gpu = (is_same_ctx && is_diff_var) ? *out :
NDArray(kRowSparseStorage, out->shape(), src.ctx(), true,
out->dtype(), out->aux_types());
if (!is_diff_var) {
common::LogOnce("The output of row_sparse_pull() on key " + std::to_string(key) +
"refers to the same NDArray as the one stored in KVStore."
"Performing row_sparse_pull() with such output is going to change the "
"data stored in KVStore. Incorrect result may be generated "
"next time row_sparse_pull() is called. To avoid such an issue,"
"consider create a new NDArray buffer to store the output.");
}
bool is_gpu = retained_gpu.ctx().dev_mask() == gpu::kDevMask;
Engine::Get()->PushAsync([=](RunContext rctx, Engine::CallbackOnComplete on_complete) {
const TBlob& indices = row_id.data();
using namespace mxnet::common;
NDArray temp = retained_gpu;
switch (temp.ctx().dev_mask()) {
case cpu::kDevMask: {
SparseRetainOpForwardRspWrapper<cpu>(rctx.get_stream<cpu>(),
src, indices, kWriteTo, &temp);
break;
}
#if MXNET_USE_CUDA
case gpu::kDevMask: {
SparseRetainOpForwardRspWrapper<gpu>(rctx.get_stream<gpu>(),
src, indices, kWriteTo, &temp);
// wait for GPU operations to complete
rctx.get_stream<gpu>()->Wait();
break;
}
#endif
default: LOG(FATAL) << MXNET_GPU_NOT_ENABLED_ERROR;
}
on_complete();
}, retained_gpu.ctx(), {src.var(), row_id.var()}, {retained_gpu.var()},
is_gpu ? FnProperty::kGPUPrioritized : FnProperty::kCPUPrioritized,
priority, "KVStoreSparseRetain");
CopyFromTo(retained_gpu, out, priority);
}
}
private:
void EnableP2P(const std::vector<Context>& devs) {
#if MXNET_USE_CUDA
std::vector<int> gpus;
for (const auto& d : devs) {
if (d.dev_mask() == gpu::kDevMask) {
gpus.push_back(d.dev_id);
}
}
int n = static_cast<int>(gpus.size());
int enabled = 0;
std::vector<int> p2p(n*n);
for (int i = 0; i < n; ++i) {
cudaSetDevice(gpus[i]);
for (int j = 0; j < n; j++) {
int access;
cudaDeviceCanAccessPeer(&access, gpus[i], gpus[j]);
if (access) {
cudaError_t e = cudaDeviceEnablePeerAccess(gpus[j], 0);
if (e == cudaSuccess || e == cudaErrorPeerAccessAlreadyEnabled) {
++enabled;
p2p[i*n+j] = 1;
}
}
}
}
if (enabled != n*(n-1)) {
// print warning info if not fully enabled
LOG(WARNING) << "only " << enabled << " out of "
<< n*(n-1) << " GPU pairs are enabled direct access. "
<< "It may affect the performance. "
<< "You can set MXNET_ENABLE_GPU_P2P=0 to turn it off";
std::string access(n, '.');
for (int i = 0; i < n; ++i) {
for (int j = 0; j < n; ++j) {
access[j] = p2p[i*n+j] ? 'v' : '.';
}
LOG(WARNING) << access;
}
}
#endif
}
using KeyAttrs = std::tuple<int, TShape, int>;
// try to allocate buff on device evenly
void InitMergeBuffer(const std::vector<Context>& devs) {
std::sort(sorted_key_attrs_.begin(), sorted_key_attrs_.end(), [](
const KeyAttrs& a, const KeyAttrs& b) {
return std::get<1>(a).Size() > std::get<1>(b).Size();
});
std::unordered_map<int, std::pair<Context, size_t>> ctx_info;
for (auto d : devs) {
ctx_info[d.dev_id] = std::make_pair(d, 0);
}
for (size_t i = 0; i < sorted_key_attrs_.size(); ++i) {
const int key = std::get<0>(sorted_key_attrs_[i]);
const TShape& shape = std::get<1>(sorted_key_attrs_[i]);
const int type = std::get<2>(sorted_key_attrs_[i]);
auto& buf = merge_buf_[key];
Context ctx;
size_t min_size = std::numeric_limits<size_t>::max();
for (auto it = ctx_info.begin(); it != ctx_info.end(); ++it) {
size_t size = it->second.second;
if (size <= min_size) {
ctx = it->second.first;
min_size = size;
}
}
// Delayed allocation - as the dense merged buffer might not be used at all if push()
// only sees sparse arrays
bool delay_alloc = true;
buf.merged = NDArray(shape, ctx, delay_alloc, type);
ctx_info[ctx.dev_id].second += shape.Size();
}
inited_ = true;
}
std::vector<KeyAttrs> sorted_key_attrs_;
/// \brief temporal space for pushing and pulling
struct BufferEntry {
/// \brief the dense merged value for reduce and broadcast operations
NDArray merged;
/// \brief the gpu buffer for copy during reduce operation
std::vector<NDArray> copy_buf;
/// \brief the residual buffer for gradient compression
std::vector<NDArray> residual;
/// \brief the small buffer for compressed data in sender
std::vector<NDArray> compressed_send_buf;
/// \brief the small buffer for compressed data in receiver
std::vector<NDArray> compressed_recv_buf;
/// \brief the merged buffer for the given storage type (could be either dense or row_sparse)
inline NDArray& merged_buf(NDArrayStorageType stype) {
if (stype == kDefaultStorage) {
CHECK(!merged.is_none()) << "unintialized merge buffer detected";
return merged;
}
CHECK(stype == kRowSparseStorage) << "unexpected storage type " << stype;
// check if sparse_merged is initialized
if (sparse_merged.is_none()) {
CHECK(!merged.is_none());
sparse_merged = NDArray(kRowSparseStorage, merged.shape(), merged.ctx(),
true, merged.dtype());
}
return sparse_merged;
}
private:
/// \brief the sparse merged value for reduce and rowsparse broadcast operations
NDArray sparse_merged;
};
std::unordered_map<int, BufferEntry> merge_buf_;
bool inited_;
};
} // namespace kvstore
} // namespace mxnet
#endif // MXNET_KVSTORE_COMM_H_
|
pbeampp.c | /**************************************************************************
PBEAMPP.C of ZIB optimizer MCF, SPEC version
This software was developed at ZIB Berlin. Maintenance and revisions
solely on responsibility of Andreas Loebel
Dr. Andreas Loebel
Ortlerweg 29b, 12207 Berlin
Konrad-Zuse-Zentrum fuer Informationstechnik Berlin (ZIB)
Scientific Computing - Optimization
Takustr. 7, 14195 Berlin-Dahlem
Copyright (c) 1998-2000 ZIB.
Copyright (c) 2000-2002 ZIB & Loebel.
Copyright (c) 2003-2005 Andreas Loebel.
**************************************************************************/
/* LAST EDIT: Sun Nov 21 16:22:04 2004 by Andreas Loebel (boss.local.de) */
/* $Id: pbeampp.c,v 1.10 2005/02/17 19:42:32 bzfloebe Exp $ */
#define K 300
#define B 50
#include "pbeampp.h"
#ifdef _PROTO_
int bea_is_dual_infeasible(arc_t *arc, cost_t red_cost)
#else
int bea_is_dual_infeasible(arc, red_cost)
arc_t *arc;
cost_t red_cost;
#endif
{
return ((red_cost < 0 && arc->ident == AT_LOWER) || (red_cost > 0 && arc->ident == AT_UPPER));
}
typedef struct basket
{
arc_t *a;
cost_t cost;
cost_t abs_cost;
} BASKET;
static long basket_size;
static BASKET basket[B + K + 1];
static BASKET *perm[B + K + 1];
#ifdef _PROTO_
void sort_basket(long min, long max)
#else
void sort_basket(min, max) long min, max;
#endif
{
long l, r;
cost_t cut;
BASKET *xchange;
l = min;
r = max;
cut = perm[(long)((l + r) / 2)]->abs_cost;
do
{
while (perm[l]->abs_cost > cut)
l++;
while (cut > perm[r]->abs_cost)
r--;
if (l < r)
{
xchange = perm[l];
perm[l] = perm[r];
perm[r] = xchange;
}
if (l <= r)
{
l++;
r--;
}
} while (l <= r);
if (min < r)
sort_basket(min, r);
if (l < max && l <= B)
sort_basket(l, max);
}
static long nr_group;
static long group_pos;
static long initialize = 1;
#ifdef _PROTO_
arc_t *primal_bea_mpp(long m, arc_t *arcs, arc_t *stop_arcs,
cost_t *red_cost_of_bea)
#else
arc_t *primal_bea_mpp(m, arcs, stop_arcs, red_cost_of_bea) long m;
arc_t *arcs;
arc_t *stop_arcs;
cost_t *red_cost_of_bea;
#endif
{
long old_group_pos;
if (initialize)
{
for (long i = 1; i < K + B + 1; i++)
perm[i] = &(basket[i]);
nr_group = ((m - 1) / K) + 1;
group_pos = 0;
basket_size = 0;
initialize = 0;
}
else
{
/*****************************************************************/
/******************* BEGINNING FIRST FOR LOOP ********************/
/*****************************************************************/
int next_array[B + K + 1];
int next_increased_array[B + K + 1];
int next_increase_count[5];
arc_t *arc_array[B + K + 1];
cost_t red_cost_array[B + K + 1];
int min = B < basket_size ? B : basket_size;
int chunk_size = (min - 2 + 1) / 4;
#pragma omp parallel for
for (long j = 0; j < 4; j++)
{
long chunk_start = 2 + j * chunk_size;
long chunk_end = j == 3 ? min : 2 + j * chunk_size + chunk_size - 1;
next_increase_count[j + 1] = 0;
for (long i = chunk_start; i <= chunk_end; i++)
{
arc_t *arc = perm[i]->a;
cost_t red_cost = arc->cost - arc->tail->potential + arc->head->potential;
int ident = arc->ident;
arc_array[i] = arc;
red_cost_array[i] = red_cost;
int predicate = (red_cost < 0 && ident == AT_LOWER) || (red_cost > 0 && ident == AT_UPPER);
next_increased_array[i] = predicate ? 1 : 0;
next_increase_count[j + 1] += predicate;
}
}
next_increase_count[0] = 0;
for (long j = 1; j <= 4; j++)
{
next_increase_count[j] += next_increase_count[j - 1];
}
#pragma omp parallel for
for (long j = 0; j < 4; j++)
{
long next = 0;
long global_next;
long chunk_start = 2 + j * chunk_size;
long chunk_end = j == 3 ? min : 2 + j * chunk_size + chunk_size - 1;
for (long i = chunk_start; i <= chunk_end; i++)
{
if (next_increased_array[i])
{
next++;
global_next = next_increase_count[j] + next;
BASKET *current_prem = perm[global_next];
cost_t red_cost = red_cost_array[i];
current_prem->a = arc_array[i];
current_prem->cost = red_cost;
current_prem->abs_cost = ABS(red_cost);
}
}
}
basket_size = next_increase_count[4];
}
/*****************************************************************/
/********************* END FIRST FOR LOOP ************************/
/*****************************************************************/
old_group_pos = group_pos;
/*****************************************************************/
/********************** BEGINNING GOTO LOOP **********************/
/*****************************************************************/
// int new_group_pos = group_pos;
// int new_group_pos_set = 0;
// int chunk_size = nr_group / 4;
// int basket_size_increase_count[5];
// int basket_size_increased_array_size = (((stop_arcs - arcs) / nr_group) + 1) * nr_group;
// // int bla = stop_arcs - arcs;
// // printf("bla: %d\n", basket_size_increased_array_size / 8);
// int *basket_size_increased_array = malloc(basket_size_increased_array_size * sizeof(int));
// for (long j = 0; j < 4; j++)
// {
// // printf("HELLO!!!!!\n");
// long chunk_start = j * chunk_size;
// long chunk_end = j == 3 ? nr_group : j * chunk_size + chunk_size;
// long real_chunk_size = chunk_end - chunk_start + 1;
// basket_size_increase_count[j + 1] = 0;
// for (long group_pos_index = 0; group_pos_index < real_chunk_size; group_pos_index++)
// {
// long current_group_pos = (group_pos + (j * chunk_size) + group_pos_index) % nr_group;
// arc_t *arc = arcs + current_group_pos;
// int index = 0;
// for (; arc < stop_arcs; arc += nr_group)
// {
// long basket_size_increased_index = nr_group * current_group_pos + index;
// // if (basket_size_increased_index < B)
// // {
// // printf("HELsdfsdfsdfLO2!!!!!\n");
// if (arc->ident > BASIC)
// {
// cost_t red_cost = arc->cost - arc->tail->potential + arc->head->potential;
// int predicate = bea_is_dual_infeasible(arc, red_cost);
// basket_size_increased_array[basket_size_increased_index] = predicate;
// basket_size_increase_count[j + 1] += predicate;
// }
// // }
// // else if (new_group_pos_set == 0)
// // {
// // new_group_pos_set = 1;
// // new_group_pos = current_group_pos;
// // }
// index++;
// }
// }
// }
// basket_size_increase_count[0] = 0;
// for (long j = 1; j <= 4; j++)
// {
// basket_size_increase_count[j] += basket_size_increase_count[j - 1];
// }
// for (long j = 0; j < 4; j++)
// {
// long global_basket_size = basket_size_increase_count[j];
// long chunk_start = j * chunk_size;
// long chunk_end = j == 3 ? nr_group : j * chunk_size + chunk_size;
// basket_size_increase_count[j + 1] = 0;
// for (long group_pos_index = 0; group_pos_index < chunk_size; group_pos_index++)
// {
// long current_group_pos = (group_pos + (j * chunk_size) + group_pos_index) % nr_group;
// arc_t *arc = arcs + current_group_pos;
// int index = 0;
// long local_basket_size = 0;
// for (; arc < stop_arcs; arc += nr_group)
// {
// long basket_size_increased_index = nr_group * current_group_pos + index;
// if (global_basket_size < B)
// {
// if (arc->ident > BASIC)
// {
// if (basket_size_increased_array[basket_size_increased_index])
// {
// local_basket_size++;
// global_basket_size = basket_size_increase_count[j] + local_basket_size;
// BASKET *current_prem = perm[global_basket_size];
// cost_t red_cost = arc->cost - arc->tail->potential + arc->head->potential;
// current_prem->a = arc;
// current_prem->cost = red_cost;
// current_prem->abs_cost = ABS(red_cost);
// }
// }
// }
// else if (new_group_pos_set == 0)
// {
// new_group_pos_set = 1;
// new_group_pos = current_group_pos;
// }
// index++;
// }
// }
// }
// group_pos = new_group_pos;
// basket_size = basket_size_increase_count[4];
/*****************************************************************/
/************************* END GOTO LOOP *************************/
/*****************************************************************/
arc_t *arc;
cost_t red_cost;
NEXT:
/* price next group */
arc = arcs + group_pos;
for (; arc < stop_arcs; arc += nr_group)
{
if (arc->ident > BASIC)
{
/* red_cost = bea_compute_red_cost( arc ); */
red_cost = arc->cost - arc->tail->potential + arc->head->potential;
if (bea_is_dual_infeasible(arc, red_cost))
{
basket_size++;
perm[basket_size]->a = arc;
perm[basket_size]->cost = red_cost;
perm[basket_size]->abs_cost = ABS(red_cost);
}
}
}
if (++group_pos == nr_group)
group_pos = 0;
if (basket_size < B && group_pos != old_group_pos)
goto NEXT;
if (basket_size == 0)
{
initialize = 1;
*red_cost_of_bea = 0;
return NULL;
}
sort_basket(1, basket_size);
*red_cost_of_bea = perm[1]->cost;
return (perm[1]->a);
}
|
oyranos_cmm_lcms.c | /** @file oyranos_cmm_lcms.c
*
* Oyranos is an open source Color Management System
*
* @par Copyright:
* 2007-2016 (C) Kai-Uwe Behrmann
*
* @brief littleCMS CMM module for Oyranos
* @author Kai-Uwe Behrmann <ku.b@gmx.de>
* @par License:
* new BSD <http://www.opensource.org/licenses/BSD-3-Clause>
* @since 2007/11/12
*/
#include <lcms.h>
#include <dlfcn.h> /* dlopen() */
#include "oyCMM_s.h"
#include "oyCMMapi4_s.h"
#include "oyCMMapi4_s_.h"
#include "oyCMMapi6_s_.h"
#include "oyCMMapi7_s.h"
#include "oyCMMapi7_s_.h"
#include "oyCMMapi10_s_.h"
#include "oyCMMui_s_.h"
#include "oyConnectorImaging_s_.h"
#include "oyProfiles_s.h"
#include "oyStructList_s.h"
#include "oyranos_cmm.h" /* the API's this CMM implements */
#include "oyranos_generic.h" /* oy_connector_imaging_static_object */
#include "oyranos_helper.h" /* oySprintf_ and other local helpers */
#include "oyranos_i18n.h"
#include "oyranos_io.h"
#include "oyranos_image.h"
#include "oyranos_object_internal.h"
#include "oyranos_string.h"
#ifdef _OPENMP
#define USE_OPENMP 1
#include <omp.h>
#endif
/*
oyCMMinfo_s lcms_cmm_module;
oyCMMapi4_s lcms_api4_cmm;
oyCMMui_s lcms_api4_ui;
oyCMMapi7_s lcms_api7_cmm;
oyConnectorImaging_s* lcms_cmmIccSocket_connectors[2];
oyConnectorImaging_s lcms_cmmIccSocket_connector;
oyConnectorImaging_s* lcms_cmmIccPlug_connectors[2];
oyConnectorImaging_s lcms_cmmIccPlug_connector;
oyCMMapi6_s lcms_api6_cmm;
oyCMMapi10_s lcms_api10_cmm;
oyCMMapi10_s lcms_api10_cmm2;
*/
extern oyCMMapi4_s_ lcms_api4_cmm;
void* oyAllocateFunc_ (size_t size);
void* oyAllocateWrapFunc_ (size_t size,
oyAlloc_f allocate_func);
void oyDeAllocateFunc_ (void * data);
#include <math.h>
/* --- internal definitions --- */
#define CMM_NICK "lcms"
#define CMM_FUNC lcms
#define CMMProfileOpen_M lcmsOpenProfileFromMem
#define CMMProfileRelease_M lcmsCloseProfile
#define CMMToString_M(text) #text
#define CMMMaxChannels_M 16
#define lcmsPROFILE "lcPR"
#define lcmsTRANSFORM "lcCC"
/** The proofing LUTs grid size may improove the sharpness of out of color
* marking, but at the prise of lost speed and increased memory consumption.
* 53 is the grid size used internally in lcms' gamut marking code. */
#define lcmsPROOF_LUT_GRID_RASTER 53
#define CMM_VERSION {0,1,1}
oyMessage_f lcms_msg = oyMessageFunc;
int lcmsErrorHandlerFunction(int ErrorCode, const char *ErrorText);
int lcmsCMMMessageFuncSet ( oyMessage_f message_func );
int lcmsCMMInit ( );
/** @struct lcmsProfileWrap_s
* @brief lcms wrapper for profile data struct
*
* @version Oyranos: 0.1.8
* @date 2007/12/10
* @since 2007/12/10 (Oyranos: 0.1.8)
*/
typedef struct {
uint32_t type; /**< shall be lcPR */
size_t size;
oyPointer block; /**< Oyranos raw profile pointer. Dont free! */
oyPointer lcms; /**< cmsHPROFILE struct */
icColorSpaceSignature sig; /**< ICC profile signature */
} lcmsProfileWrap_s;
/** @struct lcmsTransformWrap_s
* @brief lcms wrapper for transform data struct
*
* @version Oyranos: 0.1.8
* @date 2007/12/20
* @since 2007/12/20 (Oyranos: 0.1.8)
*/
typedef struct {
int type; /**< shall be lcCC */
oyPointer lcms; /**< cmsHPROFILE struct */
icColorSpaceSignature sig_in; /**< ICC profile signature */
icColorSpaceSignature sig_out; /**< ICC profile signature */
oyPixel_t oy_pixel_layout_in;
oyPixel_t oy_pixel_layout_out;
} lcmsTransformWrap_s;
lcmsTransformWrap_s * lcmsTransformWrap_Set_ (
cmsHTRANSFORM xform,
icColorSpaceSignature color_in,
icColorSpaceSignature color_out,
oyPixel_t oy_pixel_layout_in,
oyPixel_t oy_pixel_layout_out,
oyPointer_s * oy );
int lcmsCMMTransform_GetWrap_ ( oyPointer_s * cmm_ptr,
lcmsTransformWrap_s ** s );
int lcmsCMMDeleteTransformWrap ( oyPointer * wrap );
lcmsProfileWrap_s * lcmsCMMProfile_GetWrap_(
oyPointer_s * cmm_ptr );
int lcmsCMMProfileReleaseWrap ( oyPointer * p );
int lcmsCMMCheckPointer(oyPointer_s * cmm_ptr,
const char * resource );
int oyPixelToCMMPixelLayout_ ( oyPixel_t pixel_layout,
icColorSpaceSignature color_space );
char * lcmsImage_GetText ( oyImage_s * image,
int verbose,
oyAlloc_f allocateFunc );
char * lcmsFilterNode_GetText ( oyFilterNode_s * node,
oyNAME_e type,
oyAlloc_f allocateFunc );
extern char lcms_extra_options[];
cmsHPROFILE lcmsGamutCheckAbstract ( oyProfile_s * proof,
DWORD flags,
int intent,
int intent_proof );
oyPointer lcmsCMMColorConversion_ToMem_ (
cmsHTRANSFORM * xform,
size_t * size,
oyAlloc_f allocateFunc );
oyOptions_s* lcmsFilter_CmmIccValidateOptions
( oyFilterCore_s * filter,
oyOptions_s * validate,
int statical,
uint32_t * result );
cmsHPROFILE lcmsAddProfile ( oyProfile_s * p );
cmsHPROFILE lcmsAddProofProfile ( oyProfile_s * proof,
DWORD flags,
int intent,
int intent_proof );
oyPointer lcmsFilterNode_CmmIccContextToMem (
oyFilterNode_s * node,
size_t * size,
oyAlloc_f allocateFunc );
int lcmsModuleData_Convert ( oyPointer_s * data_in,
oyPointer_s * data_out,
oyFilterNode_s * node );
int lcmsFilterPlug_CmmIccRun ( oyFilterPlug_s * requestor_plug,
oyPixelAccess_s * ticket );
const char * lcmsInfoGetText ( const char * select,
oyNAME_e type,
oyStruct_s * context );
/* --- implementations --- */
/* explicitely load liblcms functions, to avoid conflicts */
static int lcms_initialised = 0;
static void * lcms_handle = NULL;
static int (*lcmsErrorAction)(int nAction) = NULL;
static void (*lcmsSetErrorHandler)(cmsErrorHandlerFunction Fn) = NULL;
static icColorSpaceSignature (*lcmsGetColorSpace)(cmsHPROFILE hProfile) = NULL;
static icColorSpaceSignature (*lcmsGetPCS)(cmsHPROFILE hProfile) = NULL;
static icProfileClassSignature (*lcmsGetDeviceClass)(cmsHPROFILE hProfile) = NULL;
static int (*l_cmsChannelsOf)(icColorSpaceSignature ColorSpace) = NULL;
static int (*lcmsSetCMYKPreservationStrategy)(int n) = NULL;
static cmsHTRANSFORM (*lcmsCreateTransform)(cmsHPROFILE Input,
DWORD InputFormat,
cmsHPROFILE Output,
DWORD OutputFormat,
int Intent,
DWORD dwFlags) = NULL;
static cmsHTRANSFORM (*lcmsCreateProofingTransform)(cmsHPROFILE Input,
DWORD InputFormat,
cmsHPROFILE Output,
DWORD OutputFormat,
cmsHPROFILE Proofing,
int Intent,
int ProofingIntent,
DWORD dwFlags) = NULL;
static cmsHTRANSFORM (*lcmsCreateMultiprofileTransform)(cmsHPROFILE hProfiles[],
int nProfiles,
DWORD InputFormat,
DWORD OutputFormat,
int Intent,
DWORD dwFlags) = NULL;
static void (*lcmsDeleteTransform)(cmsHTRANSFORM hTransform) = NULL;
static void (*lcmsDoTransform)(cmsHTRANSFORM Transform,
LPVOID InputBuffer,
LPVOID OutputBuffer,
unsigned int Size) = NULL;
static cmsHPROFILE (*lcmsTransform2DeviceLink)(cmsHTRANSFORM hTransform, DWORD dwFlags) = NULL;
static LCMSBOOL (*lcmsAddTag)(cmsHPROFILE hProfile, icTagSignature sig, const void* data) = NULL;
static LCMSBOOL (*l_cmsSaveProfileToMem)(cmsHPROFILE hProfile, void *MemPtr,
size_t* BytesNeeded) = NULL;
static cmsHPROFILE (*lcmsOpenProfileFromMem)(LPVOID MemPtr, DWORD dwSize) = NULL;
static LCMSBOOL (*lcmsCloseProfile)(cmsHPROFILE hProfile) = NULL;
static LPLUT (*lcmsAllocLUT)(void) = NULL;
static LPLUT (*lcmsAlloc3DGrid)(LPLUT Lut, int clutPoints, int inputChan, int outputChan) = NULL;
static int (*lcmsSample3DGrid)(LPLUT Lut, _cmsSAMPLER Sampler, LPVOID Cargo, DWORD dwFlags) = NULL;
static void (*lcmsFreeLUT)(LPLUT Lut) = NULL;
static cmsHPROFILE (*l_cmsCreateProfilePlaceholder)(void) = NULL;
static void (*lcmsSetDeviceClass)(cmsHPROFILE hProfile, icProfileClassSignature sig) = NULL;
static void (*lcmsSetColorSpace)(cmsHPROFILE hProfile, icColorSpaceSignature sig) = NULL;
static void (*lcmsSetPCS)(cmsHPROFILE hProfile, icColorSpaceSignature pcs) = NULL;
static LPGAMMATABLE (*lcmsBuildGamma)(int nEntries, double Gamma) = NULL;
static void (*lcmsFreeGamma)(LPGAMMATABLE Gamma) = NULL;
static cmsHPROFILE (*lcmsCreateRGBProfile)(LPcmsCIExyY WhitePoint,
LPcmsCIExyYTRIPLE Primaries,
LPGAMMATABLE TransferFunction[3]) = NULL;
static cmsHPROFILE (*lcmsCreateLabProfile)(LPcmsCIExyY WhitePoint) = NULL;
static LPcmsCIEXYZ (*lcmsD50_XYZ)(void) = NULL;
static LPcmsCIExyY (*lcmsD50_xyY)(void) = NULL;
static void (*lcmsLabEncoded2Float)(LPcmsCIELab Lab, const WORD wLab[3]) = NULL;
static void (*lcmsFloat2LabEncoded)(WORD wLab[3], const cmsCIELab* Lab) = NULL;
static double (*lcmsDeltaE)(LPcmsCIELab Lab1, LPcmsCIELab Lab2) = NULL;
#define LOAD_FUNC( func ) l##func = dlsym(lcms_handle, #func ); \
if(!l##func) lcms_msg( oyMSG_ERROR,0, OY_DBG_FORMAT_" " \
"init failed: %s", \
OY_DBG_ARGS_, dlerror() );
/** Function lcmsCMMInit
* @brief API requirement
*
* @version Oyranos: 0.9.5
* @date 2014/01/18
* @since 2007/12/11 (Oyranos: 0.1.8)
*/
int lcmsCMMInit ( oyStruct_s * filter OY_UNUSED )
{
int error = 0;
if(!lcms_initialised)
{
lcms_initialised = 1;
char * fn = oyLibNameCreate_( "lcms", 1 );
lcms_handle = dlopen(fn, RTLD_LAZY);
oyFree_m_( fn );
if(!lcms_handle)
{
lcms_msg( oyMSG_ERROR,0, OY_DBG_FORMAT_" "
"init failed: %s",
OY_DBG_ARGS_, dlerror() );
error = 1;
} else
{
LOAD_FUNC( cmsErrorAction );
LOAD_FUNC( cmsSetErrorHandler );
LOAD_FUNC( cmsGetColorSpace );
LOAD_FUNC( cmsGetPCS );
LOAD_FUNC( cmsGetDeviceClass );
LOAD_FUNC( _cmsChannelsOf );
LOAD_FUNC( cmsSetCMYKPreservationStrategy );
LOAD_FUNC( cmsCreateTransform );
LOAD_FUNC( cmsCreateProofingTransform );
LOAD_FUNC( cmsCreateMultiprofileTransform );
LOAD_FUNC( cmsDeleteTransform );
LOAD_FUNC( cmsDoTransform );
LOAD_FUNC( cmsTransform2DeviceLink );
LOAD_FUNC( cmsAddTag );
LOAD_FUNC( _cmsSaveProfileToMem );
LOAD_FUNC( cmsOpenProfileFromMem );
LOAD_FUNC( cmsCloseProfile );
LOAD_FUNC( cmsAllocLUT );
LOAD_FUNC( cmsAlloc3DGrid );
LOAD_FUNC( cmsSample3DGrid );
LOAD_FUNC( cmsFreeLUT );
LOAD_FUNC( _cmsCreateProfilePlaceholder );
LOAD_FUNC( cmsSetDeviceClass );
LOAD_FUNC( cmsSetColorSpace );
LOAD_FUNC( cmsSetPCS );
LOAD_FUNC( cmsBuildGamma );
LOAD_FUNC( cmsFreeGamma );
LOAD_FUNC( cmsCreateRGBProfile );
LOAD_FUNC( cmsCreateLabProfile );
LOAD_FUNC( cmsD50_XYZ );
LOAD_FUNC( cmsD50_xyY );
LOAD_FUNC( cmsLabEncoded2Float );
LOAD_FUNC( cmsFloat2LabEncoded );
LOAD_FUNC( cmsDeltaE );
lcmsErrorAction( LCMS_ERROR_SHOW );
lcmsSetErrorHandler( lcmsErrorHandlerFunction );
}
}
return error;
}
/** Function lcmsCMMProfile_GetWrap_
* @brief convert to lcms profile wrapper struct
*
* @version Oyranos: 0.1.8
* @date 2007/12/10
* @since 2007/12/10 (Oyranos: 0.1.8)
*/
lcmsProfileWrap_s * lcmsCMMProfile_GetWrap_( oyPointer_s * cmm_ptr )
{
lcmsProfileWrap_s * s = 0;
char * type_ = lcmsPROFILE;
uint32_t type = *((uint32_t*)type_);
if(cmm_ptr && !lcmsCMMCheckPointer( cmm_ptr, lcmsPROFILE ) &&
oyPointer_GetPointer(cmm_ptr))
s = (lcmsProfileWrap_s*) oyPointer_GetPointer(cmm_ptr);
if(s && s->type != type)
s = 0;
return s;
}
/** Function lcmsCMMTransform_GetWrap_
* @brief convert to lcms transform wrapper struct
*
* @version Oyranos: 0.1.8
* @since 2007/12/20 (Oyranos: 0.1.8)
* @date 2009/05/28
*/
int lcmsCMMTransform_GetWrap_ ( oyPointer_s * cmm_ptr,
lcmsTransformWrap_s ** s )
{
char * type_ = lcmsTRANSFORM;
int type = *((int32_t*)type_);
if(cmm_ptr && !lcmsCMMCheckPointer( cmm_ptr, lcmsTRANSFORM ) &&
oyPointer_GetPointer(cmm_ptr))
*s = (lcmsTransformWrap_s*) oyPointer_GetPointer(cmm_ptr);
if(*s && ((*s)->type != type || !(*s)->lcms))
{
*s = 0;
return 1;
}
return 0;
}
/** Function lcmsCMMProfileReleaseWrap
* @brief release a lcms profile wrapper struct
*
* @version Oyranos: 0.1.8
* @date 2007/12/20
* @since 2007/12/20 (Oyranos: 0.1.8)
*/
int lcmsCMMProfileReleaseWrap(oyPointer *p)
{
int error = !p;
lcmsProfileWrap_s * s = 0;
char * type_ = lcmsPROFILE;
uint32_t type = *((uint32_t*)type_);
char s_type[4];
if(!error && *p)
s = (lcmsProfileWrap_s*) *p;
if(!error)
error = !s;
if(!error)
memcpy(s_type, &s->type, 4);
if(!error && s->type != type)
error = 1;
if(!error)
{
CMMProfileRelease_M (s->lcms);
s->lcms = 0;
s->type = 0;
s->size = 0;
s->block = 0;
free(s);
}
if(!error)
*p = 0;
return error;
}
/** Function lcmsCMMDataOpen
* @brief oyCMMProfileOpen_t implementation
*
* @version Oyranos: 0.1.10
* @since 2007/11/12 (Oyranos: 0.1.8)
* @date 2007/12/27
*/
int lcmsCMMData_Open ( oyStruct_s * data,
oyPointer_s * oy )
{
oyPointer_s * s = 0;
int error = 0;
if(!error)
{
char * type_ = lcmsPROFILE;
int type = *((int32_t*)type_);
size_t size = 0;
oyPointer block = 0;
lcmsProfileWrap_s * s = calloc(sizeof(lcmsProfileWrap_s), 1);
if(data->type_ == oyOBJECT_PROFILE_S)
{
oyProfile_s * p = (oyProfile_s*)data;
block = oyProfile_GetMem( p, &size, 0, oyAllocateFunc_ );
}
s->type = type;
s->size = size;
s->block = block;
s->lcms = CMMProfileOpen_M( block, size );
error = oyPointer_Set( oy, 0,
lcmsPROFILE, s, CMMToString_M(CMMProfileOpen_M),
lcmsCMMProfileReleaseWrap );
}
if(!error)
s = oy;
if(!error)
error = !s;
return error;
}
/** Function lcmsCMMCheckPointer
* @brief
*
* @version Oyranos: 0.1.8
* @date 2007/11/12
* @since 2007/11/12 (Oyranos: 0.1.8)
*/
int lcmsCMMCheckPointer(oyPointer_s * cmm_ptr,
const char * resource )
{
int error = !cmm_ptr;
if(cmm_ptr &&
oyPointer_GetPointer(cmm_ptr) && oyPointer_GetResourceName(cmm_ptr))
{
int * res_id = (int*)oyPointer_GetResourceName(cmm_ptr);
if(!oyCMMlibMatchesCMM(oyPointer_GetLibName(cmm_ptr), CMM_NICK) ||
*res_id != *((int*)(resource)) )
error = 1;
} else {
error = 1;
}
return error;
}
/** Function oyPixelToCMMPixelLayout_
* @brief
*
* @version Oyranos: 0.1.8
* @date 2007/11/00
* @since 2007/11/00 (Oyranos: 0.1.8)
*/
int oyPixelToCMMPixelLayout_ ( oyPixel_t pixel_layout,
icColorSpaceSignature color_space )
{
int cmm_pixel = 0;
int chan_n = oyToChannels_m (pixel_layout);
int c_off = oyToColorOffset_m (pixel_layout);
oyDATATYPE_e data_type = oyToDataType_m (pixel_layout);
int planar = oyToPlanar_m (pixel_layout);
int flavour = oyToFlavor_m (pixel_layout);
int cchans = l_cmsChannelsOf( color_space );
int extra = chan_n - cchans;
if(chan_n > CMMMaxChannels_M)
lcms_msg( oyMSG_WARN,0, OY_DBG_FORMAT_" "
"can not handle more than %d channels; found: %d",
OY_DBG_ARGS_, CMMMaxChannels_M, chan_n);
cmm_pixel = COLORSPACE_SH(PT_ANY);
cmm_pixel |= CHANNELS_SH(cchans);
if(extra)
cmm_pixel |= EXTRA_SH(extra);
if(c_off == 1)
cmm_pixel |= SWAPFIRST_SH(1);
if(data_type == oyUINT8)
cmm_pixel |= BYTES_SH(1);
else if(data_type == oyUINT16)
cmm_pixel |= BYTES_SH(2);
if(oyToSwapColorChannels_m (pixel_layout))
cmm_pixel |= DOSWAP_SH(1);
if(oyToByteswap_m(pixel_layout))
cmm_pixel |= ENDIAN16_SH(1);
if(planar)
cmm_pixel |= PLANAR_SH(1);
if(flavour)
cmm_pixel |= FLAVOR_SH(1);
return cmm_pixel;
}
/** Function lcmsCMMDeleteTransformWrap
* @brief
*
* @version Oyranos: 0.1.8
* @since 2007/12/00 (Oyranos: 0.1.8)
* @date 2007/12/00
*/
int lcmsCMMDeleteTransformWrap(oyPointer * wrap)
{
if(wrap && *wrap)
{
lcmsTransformWrap_s * s = (lcmsTransformWrap_s*) *wrap;
lcmsDeleteTransform (s->lcms);
s->lcms = 0;
free(s);
*wrap = 0;
return 0;
}
return 1;
}
/** Function lcmsTransformWrap_Set_
* @brief fill a lcmsTransformWrap_s struct
*
* @version Oyranos: 0.1.8
* @since 2007/12/21 (Oyranos: 0.1.8)
* @date 2007/12/21
*/
lcmsTransformWrap_s * lcmsTransformWrap_Set_ (
cmsHTRANSFORM xform,
icColorSpaceSignature color_in,
icColorSpaceSignature color_out,
oyPixel_t oy_pixel_layout_in,
oyPixel_t oy_pixel_layout_out,
oyPointer_s * oy )
{
int error = !xform;
lcmsTransformWrap_s * s = 0;
if(!error)
{
char * type_ = lcmsTRANSFORM;
int type = *((int32_t*)type_);
lcmsTransformWrap_s * ltw = calloc(sizeof(lcmsTransformWrap_s), 1);
ltw->type = type;
ltw->lcms = xform; xform = 0;
ltw->sig_in = color_in;
ltw->sig_out = color_out;
ltw->oy_pixel_layout_in = oy_pixel_layout_in;
ltw->oy_pixel_layout_out = oy_pixel_layout_out;
s = ltw;
}
if(!error)
oyPointer_Set( oy, 0, 0, s,
"lcmsCMMDeleteTransformWrap", lcmsCMMDeleteTransformWrap );
return s;
}
int lcmsIntentFromOptions ( oyOptions_s * opts,
int proof )
{
int intent = 0,
intent_proof = 0;
const char * o_txt = 0;
#ifndef oyStrlen_
#define oyStrlen_ strlen
#endif
o_txt = oyOptions_FindString ( opts, "rendering_intent", 0);
if(o_txt && oyStrlen_(o_txt))
intent = atoi( o_txt );
o_txt = oyOptions_FindString ( opts, "rendering_intent_proof", 0);
if(o_txt && oyStrlen_(o_txt))
intent_proof = atoi( o_txt );
intent_proof = intent_proof == 0 ? INTENT_RELATIVE_COLORIMETRIC :
INTENT_ABSOLUTE_COLORIMETRIC;
if(proof)
return intent_proof;
else
return intent;
}
uint32_t lcmsFlagsFromOptions ( oyOptions_s * opts )
{
int bpc = 0,
cmyk_cmyk_black_preservation = 0,
gamut_warning = 0,
precalculation = 0,
flags = 0;
const char * o_txt = 0;
o_txt = oyOptions_FindString ( opts, "rendering_bpc", 0 );
if(o_txt && oyStrlen_(o_txt))
bpc = atoi( o_txt );
o_txt = oyOptions_FindString ( opts, "rendering_gamut_warning", 0 );
if(o_txt && oyStrlen_(o_txt))
gamut_warning = atoi( o_txt );
o_txt = oyOptions_FindString ( opts, "precalculation", 0 );
if(o_txt && oyStrlen_(o_txt))
precalculation = atoi( o_txt );
o_txt = oyOptions_FindString ( opts, "cmyk_cmyk_black_preservation", 0 );
if(o_txt && oyStrlen_(o_txt))
cmyk_cmyk_black_preservation = atoi( o_txt );
/* this should be moved to the CMM and not be handled here in Oyranos */
flags = bpc ? flags | cmsFLAGS_WHITEBLACKCOMPENSATION :
flags & (~cmsFLAGS_WHITEBLACKCOMPENSATION);
flags = gamut_warning ? flags | cmsFLAGS_GAMUTCHECK :
flags & (~cmsFLAGS_GAMUTCHECK);
switch(precalculation)
{
case 0: flags |= cmsFLAGS_NOTPRECALC; break;
case 1: flags |= 0; break;
case 2: flags |= cmsFLAGS_HIGHRESPRECALC; break;
case 3: flags |= cmsFLAGS_LOWRESPRECALC; break;
}
flags = cmyk_cmyk_black_preservation ? flags | cmsFLAGS_PRESERVEBLACK :
flags & (~cmsFLAGS_PRESERVEBLACK);
if(oy_debug)
lcms_msg( oyMSG_WARN,0, OY_DBG_FORMAT_"\n"
" bpc: %d gamut_warning: %d precalculation: %d\n",
OY_DBG_ARGS_,
bpc, gamut_warning, precalculation );
return flags;
}
/** Function lcmsCMMConversionContextCreate_
* @brief create a CMM transform
*
* @version Oyranos: 0.1.10
* @since 2008/12/28 (Oyranos: 0.1.10)
* @date 2009/11/04
*/
cmsHTRANSFORM lcmsCMMConversionContextCreate_ (
cmsHPROFILE * lps,
int profiles_n,
oyProfiles_s * simulation,
int proof_n,
int proof,
oyPixel_t oy_pixel_layout_in,
oyPixel_t oy_pixel_layout_out,
oyOptions_s * opts,
lcmsTransformWrap_s ** ltw,
oyPointer_s * oy )
{
oyPixel_t lcms_pixel_layout_in = 0;
oyPixel_t lcms_pixel_layout_out = 0;
int error = !lps;
cmsHTRANSFORM xform = 0;
cmsHPROFILE * merge = 0;
icColorSpaceSignature color_in = 0;
icColorSpaceSignature color_out = 0;
icProfileClassSignature profile_class_in = 0;
int intent = lcmsIntentFromOptions( opts,0 ),
intent_proof = lcmsIntentFromOptions( opts,1 ),
cmyk_cmyk_black_preservation = 0,
flags = lcmsFlagsFromOptions( opts ),
gamut_warning = flags & cmsFLAGS_GAMUTCHECK;
const char * o_txt = 0;
if(!lps || !profiles_n || !oy_pixel_layout_in || !oy_pixel_layout_out)
return 0;
flags = proof ? flags | cmsFLAGS_SOFTPROOFING :
flags & (~cmsFLAGS_SOFTPROOFING);
if(!error)
{
color_in = lcmsGetColorSpace( lps[0] );
if(profiles_n > 1)
color_out = lcmsGetColorSpace( lps[profiles_n-1] );
else
color_out = lcmsGetPCS( lps[profiles_n-1] );
profile_class_in = lcmsGetDeviceClass( lps[0] );
}
lcms_pixel_layout_in = oyPixelToCMMPixelLayout_(oy_pixel_layout_in,
color_in);
lcms_pixel_layout_out = oyPixelToCMMPixelLayout_(oy_pixel_layout_out,
color_out);
o_txt = oyOptions_FindString ( opts, "cmyk_cmyk_black_preservation", 0 );
if(o_txt && oyStrlen_(o_txt))
cmyk_cmyk_black_preservation = atoi( o_txt );
if(cmyk_cmyk_black_preservation == 2)
lcmsSetCMYKPreservationStrategy( LCMS_PRESERVE_K_PLANE );
if(!error)
{
/* we have to erase the color space */
if(profiles_n == 1 || profile_class_in == icSigLinkClass)
{
xform = lcmsCreateTransform( lps[0], lcms_pixel_layout_in,
0, lcms_pixel_layout_out,
intent, flags );
}
else if(profiles_n == 2 && (!proof_n || (!proof && !gamut_warning)))
xform = lcmsCreateTransform( lps[0], lcms_pixel_layout_in,
lps[1], lcms_pixel_layout_out,
intent, flags );
else
{
int multi_profiles_n = profiles_n;
int i;
if(proof_n && (proof || gamut_warning))
{
int len = sizeof(cmsHPROFILE) * (profiles_n + proof_n);
oyAllocHelper_m_( merge, cmsHPROFILE, profiles_n + proof_n,0, goto end);
memset( merge, 0, len );
memcpy( merge, lps, sizeof(cmsHPROFILE) * (profiles_n - 1) );
for(i = 0; i < proof_n; ++i)
merge[profiles_n-1 + i] = lcmsAddProofProfile(
oyProfiles_Get(simulation,i),flags,
intent, intent_proof);
merge[profiles_n + proof_n -1] = lps[profiles_n - 1];
/* merge effect and simulation profiles */
multi_profiles_n += proof_n;
lps = merge;
}
if(flags & cmsFLAGS_GAMUTCHECK)
flags |= cmsFLAGS_GRIDPOINTS(lcmsPROOF_LUT_GRID_RASTER);
xform = lcmsCreateMultiprofileTransform(
lps,
multi_profiles_n,
lcms_pixel_layout_in,
lcms_pixel_layout_out,
intent, flags );
if(merge) { oyDeAllocateFunc_( merge ); merge = 0; }
}
}
if(oy_debug)
lcms_msg( oyMSG_WARN,0, OY_DBG_FORMAT_"\n"
" format: %d|%d intent: %d|%d flags: %d csp: %d|%d\n",
OY_DBG_ARGS_,
lcms_pixel_layout_in,lcms_pixel_layout_out, intent,intent_proof, flags,
(int)T_COLORSPACE(lcms_pixel_layout_in), (int)T_COLORSPACE(lcms_pixel_layout_out)
);
/* reset */
lcmsSetCMYKPreservationStrategy( LCMS_PRESERVE_PURE_K );
if(!error && ltw && oy)
*ltw= lcmsTransformWrap_Set_( xform, color_in, color_out,
oy_pixel_layout_in, oy_pixel_layout_out, oy );
end:
return xform;
}
/** Function lcmsCMMColorConversion_ToMem_
*
* convert a lcms color conversion context to a device link
*
* @version Oyranos: 0.1.10
* @since 2008/12/28 (Oyranos: 0.1.10)
* @date 2008/12/28
*/
oyPointer lcmsCMMColorConversion_ToMem_ (
cmsHTRANSFORM * xform,
size_t * size,
oyAlloc_f allocateFunc )
{
int error = !xform;
oyPointer data = 0;
if(!error)
{
cmsHPROFILE dl = lcmsTransform2DeviceLink( xform, 0 );
*size = 0;
{
int nargs = 1, i;
size_t size = sizeof(int) + nargs * sizeof(cmsPSEQDESC);
LPcmsSEQ pseq = (LPcmsSEQ) oyAllocateFunc_(size);
ZeroMemory(pseq, size);
pseq ->n = nargs;
for (i=0; i < nargs; i++) {
strcpy(pseq ->seq[i].Manufacturer, CMM_NICK);
strcpy(pseq ->seq[i].Model, "CMM ");
}
lcmsAddTag(dl, icSigProfileSequenceDescTag, pseq);
free(pseq);
}
l_cmsSaveProfileToMem( dl, 0, size );
data = allocateFunc( *size );
l_cmsSaveProfileToMem( dl, data, size );
}
return data;
}
oyOptions_s* lcmsFilter_CmmIccValidateOptions
( oyFilterCore_s * filter,
oyOptions_s * validate OY_UNUSED,
int statical OY_UNUSED,
uint32_t * result )
{
uint32_t error = !filter;
if(!error)
error = oyFilterRegistrationMatch(oyFilterCore_GetRegistration(filter),
"//"OY_TYPE_STD"/icc_color",
oyOBJECT_CMM_API4_S);
*result = error;
return 0;
}
oyWIDGET_EVENT_e lcmsWidgetEvent ( oyOptions_s * options OY_UNUSED,
oyWIDGET_EVENT_e type OY_UNUSED,
oyStruct_s * event OY_UNUSED )
{return 0;}
oyDATATYPE_e lcms_cmmIcc_data_types[7] = {oyUINT8, oyUINT16, oyDOUBLE, 0};
oyConnectorImaging_s_ lcms_cmmIccSocket_connector = {
oyOBJECT_CONNECTOR_IMAGING_S,0,0,
(oyObject_s)&oy_connector_imaging_static_object,
oyCMMgetImageConnectorSocketText, /* getText */
oy_image_connector_texts, /* texts */
"//" OY_TYPE_STD "/manipulator.data", /* connector_type */
oyFilterSocket_MatchImagingPlug, /* filterSocket_MatchPlug */
0, /* is_plug == oyFilterPlug_s */
lcms_cmmIcc_data_types, /* data_types */
3, /* data_types_n; elements in data_types array */
1, /* max_color_offset */
1, /* min_channels_count; */
16, /* max_channels_count; */
1, /* min_color_count; */
16, /* max_color_count; */
1, /* can_planar; can read separated channels */
1, /* can_interwoven; can read continuous channels */
1, /* can_swap; can swap color channels (BGR)*/
1, /* can_swap_bytes; non host byte order */
1, /* can_revert; revert 1 -> 0 and 0 -> 1 */
1, /* can_premultiplied_alpha; */
1, /* can_nonpremultiplied_alpha; */
0, /* can_subpixel; understand subpixel order */
0, /* oyCHANNELTYPE_e * channel_types; */
0, /* channel_types_n */
1, /* id; relative to oyFilterCore_s, e.g. 1 */
0 /* is_mandatory; mandatory flag */
};
oyConnectorImaging_s_* lcms_cmmIccSocket_connectors[2]={&lcms_cmmIccSocket_connector,0};
oyConnectorImaging_s_ lcms_cmmIccPlug_connector = {
oyOBJECT_CONNECTOR_IMAGING_S,0,0,
(oyObject_s)&oy_connector_imaging_static_object,
oyCMMgetImageConnectorPlugText, /* getText */
oy_image_connector_texts, /* texts */
"//" OY_TYPE_STD "/manipulator.data", /* connector_type */
oyFilterSocket_MatchImagingPlug, /* filterSocket_MatchPlug */
1, /* is_plug == oyFilterPlug_s */
lcms_cmmIcc_data_types, /* data_types */
3, /* data_types_n; elements in data_types array */
1, /* max_color_offset */
1, /* min_channels_count; */
16, /* max_channels_count; */
1, /* min_color_count; */
16, /* max_color_count; */
1, /* can_planar; can read separated channels */
1, /* can_interwoven; can read continuous channels */
1, /* can_swap; can swap color channels (BGR)*/
1, /* can_swap_bytes; non host byte order */
1, /* can_revert; revert 1 -> 0 and 0 -> 1 */
1, /* can_premultiplied_alpha; */
1, /* can_nonpremultiplied_alpha; */
0, /* can_subpixel; understand subpixel order */
0, /* oyCHANNELTYPE_e * channel_types; */
0, /* channel_types_n */
1, /* id; relative to oyFilterCore_s, e.g. 1 */
0 /* is_mandatory; mandatory flag */
};
oyConnectorImaging_s_* lcms_cmmIccPlug_connectors[2]={&lcms_cmmIccPlug_connector,0};
/** Function lcmsAddProofProfile
* @brief add a abstract proofing profile to the lcms profile stack
*
* Look in the Oyranos cache for a CMM internal representation or generate a
* new abstract profile containing the proofing profiles changes. This can be
* a proofing color space simulation or out of gamut marking.
*
* @version Oyranos: 0.1.10
* @since 2009/11/05 (Oyranos: 0.1.10)
* @date 2009/11/05
*/
cmsHPROFILE lcmsAddProofProfile ( oyProfile_s * proof,
DWORD flags,
int intent,
int intent_proof )
{
int error = 0;
cmsHPROFILE * hp = 0;
oyPointer_s * cmm_ptr = 0;
lcmsProfileWrap_s * s = 0;
char * hash_text = 0,
num[12];
if(!proof || proof->type_ != oyOBJECT_PROFILE_S)
{
lcms_msg( oyMSG_WARN, (oyStruct_s*)proof, OY_DBG_FORMAT_" "
"no profile provided", OY_DBG_ARGS_ );
return 0;
}
/* build hash text */
STRING_ADD( hash_text, "abstract proofing profile " );
STRING_ADD( hash_text, oyObject_GetName( proof->oy_, oyNAME_NICK ) );
STRING_ADD( hash_text, " intent:" );
sprintf( num, "%d", intent );
STRING_ADD( hash_text, num );
STRING_ADD( hash_text, " intent_proof:" );
sprintf( num, "%d", intent_proof );
STRING_ADD( hash_text, num );
STRING_ADD( hash_text, " flags|gmtCheck|softPrf:" );
sprintf( num, "%d|%d|%d", (int)flags, (flags & cmsFLAGS_GAMUTCHECK)?1:0,
(flags & cmsFLAGS_SOFTPROOFING)?1:0 );
STRING_ADD( hash_text, num );
/* cache look up */
cmm_ptr = oyPointer_LookUpFromText( hash_text, lcmsPROFILE );
oyPointer_Set( cmm_ptr, CMM_NICK, 0,0,0,0 );
/* for empty profile create a new abstract one */
if(!oyPointer_GetPointer(cmm_ptr))
{
oyPointer_s * oy = cmm_ptr;
char * type_ = lcmsPROFILE;
uint32_t type = *((uint32_t*)type_);
size_t size = 0;
oyPointer block = 0;
lcmsProfileWrap_s * s = calloc(sizeof(lcmsProfileWrap_s), 1);
if(oy_debug == 1)
fprintf( stderr, OY_DBG_FORMAT_" created: \"%s\"",
OY_DBG_ARGS_, hash_text );
else
lcms_msg( oyMSG_DBG, (oyStruct_s*)proof,
OY_DBG_FORMAT_" created abstract proofing profile: \"%s\"",
OY_DBG_ARGS_, hash_text );
/* create */
hp = lcmsGamutCheckAbstract( proof, flags, intent, intent_proof );
if(hp)
{
/* save to memory */
l_cmsSaveProfileToMem( hp, 0, &size );
block = oyAllocateFunc_( size );
l_cmsSaveProfileToMem( hp, block, &size );
lcmsCloseProfile( hp ); hp = 0;
}
s->type = type;
s->size = size;
s->block = block;
/* reopen */
s->lcms = CMMProfileOpen_M( block, size );
error = oyPointer_Set( oy, 0,lcmsPROFILE, s, CMMToString_M(CMMProfileOpen_M),
lcmsCMMProfileReleaseWrap );
}
if(!error)
{
s = lcmsCMMProfile_GetWrap_( cmm_ptr );
error = !s;
}
if(!error)
hp = s->lcms;
oyPointer_Release( &cmm_ptr );
if(hash_text)
oyFree_m_(hash_text);
if(!error)
return hp;
else
return 0;
}
/** Function lcmsAddProfile
* @brief add a profile from Oyranos to the lcms profile stack
*
* Look in the Oyranos cache for a CMM internal representation
*
* @version Oyranos: 0.1.10
* @since 2008/12/28 (Oyranos: 0.1.10)
* @date 2008/12/28
*/
cmsHPROFILE lcmsAddProfile ( oyProfile_s * p )
{
int error = 0;
cmsHPROFILE * hp = 0;
oyPointer_s * cmm_ptr = 0;
lcmsProfileWrap_s * s = 0;
if(!p || p->type_ != oyOBJECT_PROFILE_S)
{
lcms_msg( oyMSG_WARN, (oyStruct_s*)p, OY_DBG_FORMAT_" "
"no profile provided", OY_DBG_ARGS_ );
return 0;
}
cmm_ptr = oyPointer_LookUpFromObject( (oyStruct_s*)p, lcmsPROFILE );
if(!cmm_ptr)
{
lcms_msg( oyMSG_WARN, (oyStruct_s*)p,
OY_DBG_FORMAT_" oyPointer_LookUpFromObject() failed", OY_DBG_ARGS_ );
return 0;
}
oyPointer_Set( cmm_ptr, CMM_NICK, 0,0,0,0 );
if(!oyPointer_GetPointer( cmm_ptr ))
error = lcmsCMMData_Open( (oyStruct_s*)p, cmm_ptr );
if(!error)
{
s = lcmsCMMProfile_GetWrap_( cmm_ptr );
error = !s;
}
if(!error)
hp = s->lcms;
oyPointer_Release( &cmm_ptr );
if(!error)
return hp;
else
return 0;
}
int
gamutCheckSampler(register WORD In[],
register WORD Out[],
register LPVOID Cargo)
{
cmsCIELab Lab1, Lab2;
double d;
oyPointer * ptr = (oyPointer*)Cargo;
lcmsLabEncoded2Float(&Lab1, In);
lcmsDoTransform( ptr[0], &Lab1, &Lab2, 1 );
d = lcmsDeltaE( &Lab1, &Lab2 );
if(fabs(d) > 10.0 && ptr[1])
{
Lab2.L = 50.0;
Lab2.a = Lab2.b = 0.0;
}
lcmsFloat2LabEncoded(Out, &Lab2);
return TRUE;
}
/** Function lcmsGamutCheckAbstract
* @brief convert a proofing profile into a abstract one
*
* Abstract profiles can easily be merged into a multi profile transform.
*
* @param proof the proofing profile; owned by the
* function
* @param flags the gamut check and softproof flags
* @param intent rendering intent
* @param intent_proof proof rendering intent
*
* @version Oyranos: 0.1.10
* @since 2009/11/04 (Oyranos: 0.1.10)
* @date 2009/11/05
*/
cmsHPROFILE lcmsGamutCheckAbstract ( oyProfile_s * proof,
DWORD flags,
int intent,
int intent_proof )
{
size_t size = 0;
char * data = 0;
cmsHPROFILE gmt = 0,
hLab = 0,
hproof = 0;
cmsHTRANSFORM tr1 = 0;
LPLUT gmt_lut = 0;
oyPointer ptr[2] = {0,0};
if(!(flags & cmsFLAGS_GAMUTCHECK || flags & cmsFLAGS_SOFTPROOFING))
return gmt;
hLab = lcmsCreateLabProfile(lcmsD50_xyY());
hproof = lcmsAddProfile( proof );
tr1 = lcmsCreateProofingTransform (hLab, TYPE_Lab_DBL,
hLab, TYPE_Lab_DBL,
hproof,
intent,
/* TODO The INTENT_ABSOLUTE_COLORIMETRIC should lead to
paper simulation, but does take white point into account.
Do we want this?
*/
intent_proof,
flags | cmsFLAGS_HIGHRESPRECALC);
ptr[0] = tr1;
ptr[1] = flags & cmsFLAGS_GAMUTCHECK ? (oyPointer)1 : 0;
gmt_lut = lcmsAllocLUT();
lcmsAlloc3DGrid( gmt_lut, lcmsPROOF_LUT_GRID_RASTER, 3, 3);
lcmsSample3DGrid( gmt_lut, gamutCheckSampler, &ptr, 0 );
gmt = l_cmsCreateProfilePlaceholder();
lcmsSetDeviceClass( gmt, icSigAbstractClass );
lcmsSetColorSpace( gmt, icSigLabData );
lcmsSetPCS( gmt, icSigLabData );
lcmsAddTag( gmt, icSigProfileDescriptionTag, (char*)"proofing");
lcmsAddTag( gmt, icSigCopyrightTag, (char*)"no copyright; use freely" );
lcmsAddTag( gmt, icSigMediaWhitePointTag, lcmsD50_XYZ() );
lcmsAddTag( gmt, icSigAToB0Tag, gmt_lut );
if(oy_debug && getenv("OY_DEBUG_WRITE"))
{
l_cmsSaveProfileToMem( gmt, 0, &size );
data = oyAllocateFunc_( size );
l_cmsSaveProfileToMem( gmt, data, &size );
oyWriteMemToFile_( "dbg_dl_proof.icc", data, size );
if(data) { oyDeAllocateFunc_( data ); data = 0; }
}
if(hLab) { lcmsCloseProfile( hLab ); hLab = 0; }
if(tr1) { lcmsDeleteTransform( tr1 ); tr1 = 0; }
if(gmt_lut) { lcmsFreeLUT( gmt_lut ); gmt_lut = 0; }
return gmt;
}
/**
* This function implements oyMOptions_Handle_f.
*
* @version Oyranos: 0.3.0
* @since 2011/02/21 (Oyranos: 0.3.0)
* @date 2011/02/21
*/
int lcmsMOptions_Handle2 ( oyOptions_s * options,
const char * command,
oyOptions_s ** result )
{
int error = 0;
oyProfile_s * prof = 0,
* p = 0;
if(oyFilterRegistrationMatch(command,"can_handle", 0))
{
if(oyFilterRegistrationMatch(command,"create_profile", 0))
{
p = (oyProfile_s*) oyOptions_GetType( options,-1, "proofing_profile",
oyOBJECT_PROFILE_S );
if(!p)
{
error = -1;
}
oyProfile_Release( &p );
return error;
}
else
return -1;
}
else if(oyFilterRegistrationMatch(command,"create_profile", 0))
{
p = (oyProfile_s*) oyOptions_GetType( options,-1, "proofing_profile",
oyOBJECT_PROFILE_S );
if(p)
{
int intent = lcmsIntentFromOptions( options,0 ),
intent_proof = lcmsIntentFromOptions( options,1 ),
flags = lcmsFlagsFromOptions( options );
oyOption_s * o;
size_t size = 0;
char * block = 0;
cmsHPROFILE hp = lcmsAddProofProfile( p, flags | cmsFLAGS_SOFTPROOFING,
intent, intent_proof );
oyProfile_Release( &p );
if(hp)
{
l_cmsSaveProfileToMem( hp, 0, &size );
block = oyAllocateFunc_( size );
l_cmsSaveProfileToMem( hp, block, &size );
lcmsCloseProfile( hp ); hp = 0;
}
prof = oyProfile_FromMem( size, block, 0, 0 );
if(block && size)
{ free(block); block = 0; size = 0; }
o = oyOption_FromRegistration( OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH "icc_profile.create_profile.proofing_effect._" CMM_NICK,
0 );
error = oyOption_MoveInStruct( o, (oyStruct_s**) &prof );
if(!*result)
*result = oyOptions_New(0);
oyOptions_MoveIn( *result, &o, -1 );
} else
lcms_msg( oyMSG_WARN, (oyStruct_s*)options, OY_DBG_FORMAT_ " "
"no option \"proofing_effect\" of type oyProfile_s found",
OY_DBG_ARGS_ );
}
return 0;
}
oyProfiles_s * lcmsProfilesFromOptions( oyFilterNode_s * node, oyFilterPlug_s * plug,
oyOptions_s * node_options,
const char * key, int profiles_switch, int verbose )
{
oyProfiles_s * profiles = NULL;
oyOption_s * o = NULL;
if(profiles_switch || oy_debug || verbose)
o = oyOptions_Find( node_options, key, oyNAME_PATTERN );
if(o)
{
profiles = (oyProfiles_s*) oyOption_GetStruct( o, oyOBJECT_PROFILES_S );
if((oy_debug || verbose))
{
lcms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_
" found \"%s\" %d switch %d",
OY_DBG_ARGS_, key, oyProfiles_Count( profiles ), profiles_switch );
} else
if( !profiles )
{
oyFilterSocket_Callback( plug, oyCONNECTOR_EVENT_INCOMPATIBLE_OPTION );
lcms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_
" incompatible \"%s\"", OY_DBG_ARGS_, key );
}
oyOption_Release( &o );
}
if(!profiles_switch)
oyProfiles_Release( &profiles );
return profiles;
}
/** Function lcmsFilterNode_CmmIccContextToMem
* @brief implement oyCMMFilterNode_CreateContext_f()
*
* @version Oyranos: 0.1.8
* @since 2008/11/01 (Oyranos: 0.1.8)
* @date 2008/11/01
*/
oyPointer lcmsFilterNode_CmmIccContextToMem (
oyFilterNode_s * node,
size_t * size,
oyAlloc_f allocateFunc )
{
/*int error = !node || !size;*/
oyPointer block = 0;
int error = 0;
int n,i,len;
oyDATATYPE_e data_type = 0;
size_t size_ = 0;
oyFilterPlug_s * plug = oyFilterNode_GetPlug( node, 0 );
oyFilterSocket_s * socket = oyFilterNode_GetSocket( node, 0 ),
* remote_socket = oyFilterPlug_GetSocket( plug );
oyImage_s * image_input = 0,
* image_output = 0;
cmsHPROFILE * lps = 0;
cmsHTRANSFORM xform = 0;
oyOptions_s * node_tags = oyFilterNode_GetTags( node ),
* node_options = oyFilterNode_GetOptions( node, 0 );
oyProfile_s * p = 0,
* prof = 0,
* image_input_profile,
* image_output_profile;
oyProfiles_s * profiles = 0,
* profs = 0;
oyProfileTag_s * psid = 0,
* info = 0,
* cprt = 0;
int profiles_n = 0,
profiles_simulation_n = 0,
proof = 0,
effect_switch = 0;
int verbose = oyOptions_FindString( node_tags, "verbose", "true" ) ? 1 : 0;
image_input = (oyImage_s*)oyFilterSocket_GetData( remote_socket );
image_output = (oyImage_s*)oyFilterSocket_GetData( socket );
image_input_profile = oyImage_GetProfile( image_input );
image_output_profile = oyImage_GetProfile( image_output );
if(!image_input)
return 0;
if(image_input->type_ != oyOBJECT_IMAGE_S)
{
oyFilterSocket_Callback( plug, oyCONNECTOR_EVENT_INCOMPATIBLE_DATA );
lcms_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_" missed input image %d", OY_DBG_ARGS_,
image_input->type_ );
}
if(!image_output || image_output->type_ != oyOBJECT_IMAGE_S)
{
oyFilterSocket_Callback( plug, oyCONNECTOR_EVENT_INCOMPATIBLE_DATA );
lcms_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_" missed output image %d", OY_DBG_ARGS_,
image_output?image_output->type_:0 );
}
data_type = oyToDataType_m( oyImage_GetPixelLayout( image_input, oyLAYOUT ) );
if(data_type == oyFLOAT)
{
oyFilterSocket_Callback( plug, oyCONNECTOR_EVENT_INCOMPATIBLE_DATA );
lcms_msg( oyMSG_WARN, (oyStruct_s*)node,
OY_DBG_FORMAT_" can not handle oyFLOAT", OY_DBG_ARGS_ );
}
len = sizeof(cmsHPROFILE) * (15 + 2 + 1);
lps = oyAllocateFunc_( len );
memset( lps, 0, len );
/* input profile */
lps[ profiles_n++ ] = lcmsAddProfile( image_input_profile );
if(!image_input_profile)
{
lcms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_" "
"missed image_input->profile_", OY_DBG_ARGS_ );
return 0;
}
p = oyProfile_Copy( image_input_profile, 0 );
profs = oyProfiles_New( 0 );
oyProfiles_MoveIn( profs, &p, -1 );
/* effect profiles */
effect_switch = oyOptions_FindString ( node_options, "effect_switch", "1" ) ? 1 : 0;
profiles = lcmsProfilesFromOptions( node, plug, node_options,
"profiles_effect", effect_switch, verbose );
n = oyProfiles_Count( profiles );
if(n)
for(i = 0; i < n; ++i)
{
p = oyProfiles_Get( profiles, i );
/* Look in the Oyranos cache for a CMM internal representation */
lps[ profiles_n++ ] = lcmsAddProfile( p );
error = oyProfiles_MoveIn( profs, &p, -1 );
}
oyProfiles_Release( &profiles );
/* simulation profile */
proof = oyOptions_FindString ( node_options, "proof_soft", "1" ) ? 1 : 0;
proof += oyOptions_FindString ( node_options, "proof_hard", "1" ) ? 1 : 0;
if(oy_debug > 2 && proof)
lcms_msg( oyMSG_DBG, (oyStruct_s*)node, OY_DBG_FORMAT_
" proof requested",OY_DBG_ARGS_);
profiles = lcmsProfilesFromOptions( node, plug, node_options,
"profiles_simulation", proof, verbose );
n = oyProfiles_Count( profiles );
if(n)
for(i = 0; i < n; ++i)
{
p = oyProfiles_Get( profiles, i );
if(oy_debug)
lcms_msg( oyMSG_DBG,(oyStruct_s*)node, OY_DBG_FORMAT_
" found profile: %s",
OY_DBG_ARGS_, p?oyProfile_GetFileName( p,-1 ):"????");
error = oyProfiles_MoveIn( profs, &p, -1 );
++profiles_simulation_n;
oyProfile_Release( &p );
}
else
if(verbose || oy_debug > 2)
lcms_msg( oyMSG_DBG,(oyStruct_s*)node, OY_DBG_FORMAT_
" no simulation profile found", OY_DBG_ARGS_);
/* output profile */
if(!image_output_profile)
{
lcms_msg( oyMSG_WARN, (oyStruct_s*)node, OY_DBG_FORMAT_" "
"missed image_output->profile_", OY_DBG_ARGS_ );
return 0;
}
lps[ profiles_n++ ] = lcmsAddProfile( image_output_profile );
p = oyProfile_Copy( image_output_profile, 0 );
oyProfiles_MoveIn( profs, &p, -1 );
*size = 0;
/* create the context */
xform = lcmsCMMConversionContextCreate_( lps, profiles_n,
profiles, profiles_simulation_n, proof,
oyImage_GetPixelLayout( image_input, oyLAYOUT ),
oyImage_GetPixelLayout( image_output, oyLAYOUT ),
node_options, 0, 0);
error = !xform;
if(!error)
{
if(oy_debug)
block = lcmsCMMColorConversion_ToMem_( xform, size, oyAllocateFunc_ );
else
block = lcmsCMMColorConversion_ToMem_( xform, size, allocateFunc );
error = !block && !*size;
lcmsDeleteTransform( xform ); xform = 0;
}
/* additional tags for debugging */
if(!error && (oy_debug || verbose))
{
if(!error && size)
{
size_ = *size;
prof = oyProfile_FromMem( size_, block, 0, 0 );
psid = oyProfile_GetTagById( prof, icSigProfileSequenceIdentifierTag );
/* icSigProfileSequenceIdentifierType */
if(!psid)
{
oyStructList_s * list = oyStructList_New(0);
int i, n = oyProfiles_Count( profs );
for( i = 0; i < n ; ++i )
{
oyProfile_s * p = oyProfiles_Get( profs, i );
oyStructList_MoveIn( list, (oyStruct_s**) &p, -1, 0 );
}
psid = oyProfileTag_Create( list, icSigProfileSequenceIdentifierTag,
icSigProfileSequenceIdentifierType, 0, 0 );
if(psid)
error = oyProfile_TagMoveIn ( prof, &psid, -1 );
oyStructList_Release( &list );
}
/* Info tag */
if(!error)
{
oyStructList_s * list = oyStructList_Create( oyOBJECT_NONE, "lcmsFilterNode_CmmIccContextToMem()", 0);
char h[5] = {"Info"};
uint32_t * hi = (uint32_t*)&h;
char * cc_name = lcmsFilterNode_GetText( node, oyNAME_NICK,
oyAllocateFunc_ );
const char * lib_name = oyFilterNode_GetModuleName( node );
oyStructList_MoveInName( list, &cc_name, 0, oyNAME_NAME );
oyStructList_AddName( list, lib_name, 0, oyNAME_NICK );
if(!error)
{
info = oyProfileTag_Create( list, (icTagSignature)oyValueUInt32(*hi),
icSigTextType, 0, 0);
error = !info;
}
oyStructList_Release( &list );
if(info)
error = oyProfile_TagMoveIn ( prof, &info, -1 );
}
if(!error)
cprt = oyProfile_GetTagById( prof, icSigCopyrightTag );
/* icSigCopyrightTag */
if(!error && !cprt)
{
oyStructList_s * list = oyStructList_New(0);
error = oyStructList_AddName( list, "no copyright; use freely", -1, oyNAME_NAME );
if(!error)
{
cprt = oyProfileTag_Create( list, icSigCopyrightTag,
icSigTextType, 0, 0);
error = !cprt;
}
oyStructList_Release( &list );
if(!error)
error = oyProfile_TagMoveIn ( prof, &cprt, -1 );
}
if(block)
{ oyDeAllocateFunc_( block ); block = 0; size_ = 0; }
block = oyProfile_GetMem( prof, &size_, 0, allocateFunc );
*size = size_;
oyProfile_Release( &prof );
}
}
oyFilterPlug_Release( &plug );
oyFilterSocket_Release( &socket );
oyFilterSocket_Release( & remote_socket );
oyOptions_Release( &node_tags );
oyImage_Release( &image_input );
oyImage_Release( &image_output );
oyProfile_Release( &image_input_profile );
oyProfile_Release( &image_output_profile );
oyOptions_Release( &node_options );
return block;
}
void lcmsTextFromFlags ( char ** text,
int flags );
void lcmsTextFromCmmLayout ( char ** text,
int cmm_pixel_layout )
{
int f = cmm_pixel_layout;
oyStringAddPrintf( text, 0,0,
" <cmm_pixel_layout cmm=\"%s\" value=\"%d\" dither=\"%d\" colorspace=\"%d\" swapfirst=\"%d\" flavor=\"%d\" planar=\"%d\" endian16=\"%d\" doswap=\"%d\" extra=\"%d\" channels=\"%d\" bytes=\"%d\" />\n",
CMM_NICK, f,
T_DITHER(f),
T_COLORSPACE(f),
T_SWAPFIRST(f),
T_FLAVOR(f),
T_PLANAR(f),
T_ENDIAN16(f),
T_DOSWAP(f),
T_EXTRA(f),
T_CHANNELS(f),
T_BYTES(f)
);
}
char * lcmsImage_GetText ( oyImage_s * image,
int verbose,
oyAlloc_f allocateFunc )
{
oyPixel_t pixel_layout = oyImage_GetPixelLayout(image,oyLAYOUT);
int n = oyToChannels_m( pixel_layout );
oyProfile_s * profile = oyImage_GetProfile( image );
int cchan_n = oyProfile_GetChannelsCount( profile );
int coff_x = oyToColorOffset_m( pixel_layout );
oyDATATYPE_e t = oyToDataType_m( pixel_layout );
int swap = oyToSwapColorChannels_m( pixel_layout );
/*int revert= oyT_FLAVOR_M( pixel_layout );*/
int so = oyDataTypeGetSize( t );
char * text = oyAllocateFunc_(512);
char * hash_text = 0;
oyImage_s * s = image;
/* describe the image */
oySprintf_( text, " <oyImage_s\n");
hashTextAdd_m( text );
if(oy_debug || verbose)
oySprintf_( text, " profile=\"%s\"\n", oyProfile_GetText(profile,
oyNAME_NAME));
else
oySprintf_( text, " %s\n", oyProfile_GetText(profile, oyNAME_NICK));
hashTextAdd_m( text );
oySprintf_( text, " <channels all=\"%d\" color=\"%d\" />\n", n,cchan_n);
hashTextAdd_m( text );
oySprintf_( text,
" <offsets first_color_sample=\"%d\" next_pixel=\"%d\" />\n"
/*" next line = %d\n"*/,
coff_x, oyImage_GetPixelLayout( s,oyPOFF_X )/*, mask[oyPOFF_Y]*/ );
hashTextAdd_m( text );
if(swap || oyToByteswap_m( pixel_layout ))
{
hashTextAdd_m( " <swap" );
if(swap)
hashTextAdd_m( " colorswap=\"yes\"" );
if( oyToByteswap_m( pixel_layout ) )
hashTextAdd_m( " byteswap=\"yes\"" );
hashTextAdd_m( " />\n" );
}
if( oyToFlavor_m( pixel_layout ) )
{
oySprintf_( text, " <flavor value=\"yes\" />\n" );
hashTextAdd_m( text );
}
oySprintf_( text, " <sample_type value=\"%s[%dByte]\" />\n",
oyDataTypeToText(t), so );
hashTextAdd_m( text );
{
icColorSpaceSignature color_space = oyProfile_GetSignature(
profile, oySIGNATURE_COLOR_SPACE );
int lcms_pixel_layout = oyPixelToCMMPixelLayout_( pixel_layout,
color_space );
lcmsTextFromCmmLayout( &hash_text, lcms_pixel_layout );
}
oySprintf_( text, " </oyImage_s>");
hashTextAdd_m( text );
if(allocateFunc == oyAllocateFunc_)
oyDeAllocateFunc_(text);
else
{
oyDeAllocateFunc_(text);
text = hash_text;
hash_text = oyStringCopy_( text, allocateFunc );
oyDeAllocateFunc_( text );
}
text = 0;
return hash_text;
}
/** Function lcmsFilterNode_GetText
* @brief implement oyCMMFilterNode_GetText_f()
*
* @version Oyranos: 0.1.10
* @since 2008/12/27 (Oyranos: 0.1.10)
* @date 2009/06/02
*/
char * lcmsFilterNode_GetText ( oyFilterNode_s * node,
oyNAME_e type,
oyAlloc_f allocateFunc )
{
#ifdef NO_OPT
return oyStringCopy_( oyFilterNode_GetText( node, type ), allocateFunc );
#else
const char * tmp = 0,
* model = 0;
char * hash_text = 0,
* temp = 0;
oyFilterNode_s * s = node;
oyImage_s * in_image = 0,
* out_image = 0;
int verbose;
oyOptions_s * opts = oyFilterNode_GetOptions( node, 0 );
oyOptions_s * node_tags = oyFilterNode_GetTags( node ),
* opts_tmp, * opts_tmp2, * options;
oyFilterCore_s * node_core = oyFilterNode_GetCore( node );
oyFilterPlug_s * plug = oyFilterNode_GetPlug( node, 0 );
oyFilterSocket_s * socket = oyFilterNode_GetSocket( node, 0 ),
* remote_socket = oyFilterPlug_GetSocket( plug );
/* pick all sockets (output) data */
out_image = (oyImage_s*)oyFilterSocket_GetData( remote_socket );
/* pick all plug (input) data */
in_image = (oyImage_s*)oyFilterSocket_GetData( socket );
if(!node)
return 0;
verbose = oyOptions_FindString( node_tags, "verbose", "true" ) ? 1 : 0;
/* 1. create hash text */
hashTextAdd_m( "<oyFilterNode_s>\n " );
/* the filter text */
hashTextAdd_m( oyFilterCore_GetText( node_core, oyNAME_NAME ) );
/* make a description */
{
/* input data */
hashTextAdd_m( " <data_in>\n" );
if(in_image)
{
temp = lcmsImage_GetText( in_image, verbose, oyAllocateFunc_ );
hashTextAdd_m( temp );
oyDeAllocateFunc_(temp); temp = 0;
}
hashTextAdd_m( "\n </data_in>\n" );
/* pick inbuild defaults */
opts_tmp2 = oyOptions_FromText( lcms_extra_options, 0, NULL );
opts_tmp = oyOptions_ForFilter( "//" OY_TYPE_STD "/icc_color",
oyOPTIONSOURCE_FILTER | OY_SELECT_COMMON , 0 );
options = oyOptions_FromBoolean( opts_tmp, opts_tmp2, oyBOOLEAN_UNION,NULL);
oyOptions_Release( &opts_tmp );
oyOptions_Release( &opts_tmp2 );
opts_tmp = options;
/* add existing custom options */
options = oyOptions_FromBoolean( opts_tmp, opts, oyBOOLEAN_UNION,NULL);
oyOptions_Release( &opts_tmp );
/* options -> xforms */
if(type != oyNAME_NICK)
{
hashTextAdd_m( " <oyOptions_s>\n" );
model = oyOptions_GetText( opts, oyNAME_NAME );
hashTextAdd_m( model );
hashTextAdd_m( "\n </oyOptions_s>\n" );
}
/* cmm options */
{
int intent = lcmsIntentFromOptions( options,0 ),
intent_proof = lcmsIntentFromOptions( options,1 ),
flags = lcmsFlagsFromOptions( options );
oyStringAddPrintf( &hash_text, 0,0,
" <cmm_options intent=\"%d\" intent_proof=\"%d\" flags=\"%d\" />\n",
intent,intent_proof, flags
);
}
/* output data */
hashTextAdd_m( " <data_out>\n" );
if(out_image)
{
temp = lcmsImage_GetText( out_image, verbose, oyAllocateFunc_ );
hashTextAdd_m( temp );
oySTRUCT_FREE_m( s, temp ); temp = 0;
}
hashTextAdd_m( "\n </data_out>\n" );
}
hashTextAdd_m( tmp );
hashTextAdd_m( "</oyFilterNode_s>\n" );
oyOptions_Release( &opts );
oyOptions_Release( &node_tags );
oyFilterCore_Release( &node_core );
oyFilterPlug_Release( &plug );
oyFilterSocket_Release( &socket );
oyFilterSocket_Release( &remote_socket );
return oyStringCopy_( hash_text, allocateFunc );
#endif
}
/** Function lcmsModuleData_Convert
* @brief convert between data formats
* @ingroup cmm_handling
*
* The function might be used to provide a module specific context.
* Implements oyModuleData_Convert_f
*
* @version Oyranos: 0.3.0
* @since 2008/12/28 (Oyranos: 0.1.10)
* @date 2008/12/28
*/
int lcmsModuleData_Convert ( oyPointer_s * data_in,
oyPointer_s * data_out,
oyFilterNode_s * node )
{
int error = !data_in || !data_out;
oyPointer_s * cmm_ptr_in = data_in,
* cmm_ptr_out = data_out;
lcmsTransformWrap_s * ltw = 0;
cmsHTRANSFORM xform = 0;
cmsHPROFILE lps[2] = {0,0};
oyFilterPlug_s * plug = oyFilterNode_GetPlug( node, 0 );
oyFilterSocket_s * socket = oyFilterNode_GetSocket( node, 0 ),
* remote_socket = oyFilterPlug_GetSocket( plug );
oyOptions_s * node_options = oyFilterNode_GetOptions( node, 0 );
oyImage_s * image_input = (oyImage_s*)oyFilterSocket_GetData( remote_socket ),
* image_output = (oyImage_s*)oyFilterSocket_GetData( socket );
if(!error)
{
cmm_ptr_in = (oyPointer_s*) data_in;
cmm_ptr_out = (oyPointer_s*) data_out;
}
if(!error &&
( (strcmp( oyPointer_GetResourceName(cmm_ptr_in), oyCOLOR_ICC_DEVICE_LINK ) != 0) ||
(strcmp( oyPointer_GetResourceName(cmm_ptr_out), lcmsTRANSFORM ) != 0) ) )
error = 1;
if(!error)
{
lps[0] = CMMProfileOpen_M( oyPointer_GetPointer(cmm_ptr_in),
oyPointer_GetSize( cmm_ptr_in ) );
xform = lcmsCMMConversionContextCreate_( lps, 1, 0,0,0,
oyImage_GetPixelLayout( image_input, oyLAYOUT ),
oyImage_GetPixelLayout( image_output, oyLAYOUT ),
node_options,
<w, cmm_ptr_out );
if(!xform)
{
uint32_t f = oyImage_GetPixelLayout( image_input, oyLAYOUT );
lcms_msg( oyMSG_WARN,(oyStruct_s*) node, OY_DBG_FORMAT_
"colorspace:%d extra:%d channels:%d lcms_bytes%d",
OY_DBG_ARGS_,
T_COLORSPACE(f), T_EXTRA(f), T_CHANNELS(f), T_BYTES(f) );
error = 1;
}
CMMProfileRelease_M (lps[0] );
}
oyFilterPlug_Release( &plug );
oyFilterSocket_Release( &socket );
oyFilterSocket_Release( & remote_socket );
oyImage_Release( &image_input );
oyImage_Release( &image_output );
oyOptions_Release( &node_options );
return error;
}
/** Function lcmsFilterPlug_CmmIccRun
* @brief implement oyCMMFilterPlug_GetNext_f()
*
* @version Oyranos: 0.1.10
* @since 2008/07/18 (Oyranos: 0.1.8)
* @date 2009/05/01
*/
int lcmsFilterPlug_CmmIccRun ( oyFilterPlug_s * requestor_plug,
oyPixelAccess_s * ticket )
{
int j, k, n;
int error = 0;
int channels = 0;
oyDATATYPE_e data_type_in = 0,
data_type_out = 0;
int bps_in;
#if defined(DEBUG)
oyPixel_t pixel_layout_in;
#endif
oyFilterSocket_s * socket = oyFilterPlug_GetSocket( requestor_plug );
oyFilterPlug_s * plug = 0;
oyFilterNode_s * input_node,
* node = oyFilterSocket_GetNode( socket );
oyImage_s * image_input = 0, * image_output = 0;
oyArray2d_s * array_in = 0, * array_out = 0;
lcmsTransformWrap_s * ltw = 0;
oyPixelAccess_s * new_ticket = ticket;
plug = oyFilterNode_GetPlug( node, 0 );
input_node = oyFilterNode_GetPlugNode( node, 0 );
image_input = oyFilterPlug_ResolveImage( plug, socket, ticket );
#if defined(DEBUG)
pixel_layout_in = oyImage_GetPixelLayout( image_input, oyLAYOUT );
#endif
image_output = oyPixelAccess_GetOutputImage( ticket );
if(oyImage_GetPixelLayout( image_input, oyLAYOUT ) !=
oyImage_GetPixelLayout( image_output, oyLAYOUT ))
{
/* create a new ticket to avoid pixel layout conflicts */
/* keep old ticket array dimensions */
oyArray2d_s * a,
* old_a = oyPixelAccess_GetArray( new_ticket );
new_ticket = oyPixelAccess_Copy( ticket, ticket->oy_ );
/* remove old array as it's layout does not fit */
oyPixelAccess_SetArray( new_ticket, 0, 0 );
/* should be empty */
a = oyPixelAccess_GetArray( new_ticket );
if(!a)
{
int channels_out = oyImage_GetPixelLayout( image_output, oyCHANS );
int channels_in = oyImage_GetPixelLayout( image_input, oyCHANS );
/* Use original pixel size for being save and do not fiddle with ROI's */
int w = oyArray2d_GetDataGeo1( old_a, 2 ) / channels_out;
int h = oyArray2d_GetDataGeo1( old_a, 3 );
a = oyArray2d_Create( NULL, w * channels_in,h, oyToDataType_m( oyImage_GetPixelLayout( image_input, oyLAYOUT ) ), ticket->oy_ );
}
oyArray2d_Release( &old_a );
oyPixelAccess_SetArray( new_ticket, a, 0 );
oyArray2d_Release( &a );
}
/* We let the input filter do its processing first. */
error = oyFilterNode_Run( input_node, plug, new_ticket );
if(error != 0) return error;
array_in = oyPixelAccess_GetArray( new_ticket );
array_out = oyPixelAccess_GetArray( ticket );
data_type_in = oyToDataType_m( oyImage_GetPixelLayout( image_input, oyLAYOUT ) );
bps_in = oyDataTypeGetSize( data_type_in );
if(data_type_in == oyFLOAT)
{
oyFilterSocket_Callback( requestor_plug, oyCONNECTOR_EVENT_INCOMPATIBLE_DATA );
lcms_msg(oyMSG_WARN,0, OY_DBG_FORMAT_" can not handle oyFLOAT",OY_DBG_ARGS_);
error = 1;
}
if(!image_output)
{
lcms_msg( oyMSG_WARN,0, OY_DBG_FORMAT_ " no ticket->output_image",
OY_DBG_ARGS_);
error = 1;
}
if(!error)
{
oyPointer_s * backend_data = oyFilterNode_GetContext( node );
data_type_out = oyToDataType_m( oyImage_GetPixelLayout( image_output, oyLAYOUT ) );
channels = oyToChannels_m( oyImage_GetPixelLayout( image_output, oyLAYOUT ) );
error = lcmsCMMTransform_GetWrap_( backend_data, <w );
oyPointer_Release( &backend_data );
}
DBG_NUM2_S( "channels in/out: %d->%d",
oyToChannels_m( pixel_layout_in ), channels );
if(ltw && !array_out)
{
lcms_msg( oyMSG_ERROR,0, OY_DBG_FORMAT_ " no ticket->array",
OY_DBG_ARGS_);
error = 1;
}
/* now do some position blind manipulations */
if(ltw && error <= 0)
{
uint8_t * array_in_tmp = 0,
* array_out_tmp = 0;
float * array_in_tmp_flt = 0,
* array_out_tmp_flt = 0;
double * array_in_tmp_dbl = 0,
* array_out_tmp_dbl = 0;
uint8_t ** array_in_data = oyArray2d_GetData( array_in ),
** array_out_data = oyArray2d_GetData( array_out );
int threads_n =
#if defined(_OPENMP) && defined(USE_OPENMP)
omp_get_max_threads();
#else
1;
#endif
int w_in = (int)(oyArray2d_GetWidth(array_in)+0.5),
w_out = (int)(oyArray2d_GetWidth(array_out)+0.5);
int stride_in = w_in * bps_in;
double xyz_factor_in = 1.0,
xyz_factor_out = 1.0;
n = w_out / channels;
lcms_msg( oyMSG_DBG,(oyStruct_s*)requestor_plug, OY_DBG_FORMAT_
" threads_n: %d",
OY_DBG_ARGS_, threads_n );
if(!(data_type_in == oyUINT8 ||
data_type_in == oyUINT16 ||
data_type_in == oyDOUBLE))
{
oyFilterSocket_Callback( requestor_plug, oyCONNECTOR_EVENT_INCOMPATIBLE_DATA );
error = 1;
}
/* format for PT_ANY from lcms */
if((data_type_in == oyFLOAT ||
data_type_in == oyDOUBLE))
{
if(ltw->sig_in == icSigXYZData)
xyz_factor_in = 1.0 + 32767.0/32768.0;
array_in_tmp = oyAllocateFunc_( stride_in * threads_n );
if(data_type_in == oyFLOAT)
array_in_tmp_flt = (float*) array_in_tmp;
else if(data_type_in == oyDOUBLE)
array_in_tmp_dbl = (double*) array_in_tmp;
}
if((data_type_out == oyFLOAT ||
data_type_out == oyDOUBLE))
{
if(ltw->sig_out == icSigXYZData)
xyz_factor_out = 1.0 + 32767.0/32768.0;
array_out_tmp = array_out_data[0];
}
/* - - - - - conversion - - - - - */
/*lcms_msg(oyMSG_WARN,(oyStruct_s*)ticket, "%s: %d Start lines: %d",
__FILE__,__LINE__, array_out->height);*/
if(!error)
{
const int use_xyz_scale = 1;
int index = 0;
int array_out_height = oyArray2d_GetHeight(array_out);
if(array_out_height > threads_n * 10)
{
#if defined(USE_OPENMP)
#pragma omp parallel for private(index,j,array_in_tmp_flt,array_in_tmp_dbl,array_out_tmp_flt,array_out_tmp_dbl)
#endif
for( k = 0; k < array_out_height; ++k)
{
if(array_in_tmp && use_xyz_scale)
{
#if defined(_OPENMP) && defined(USE_OPENMP)
index = omp_get_thread_num();
#endif
memcpy( &array_in_tmp[stride_in*index], array_in_data[k],
w_in * bps_in );
if(data_type_in == oyFLOAT)
{
array_in_tmp_flt = (float*) &array_in_tmp[stride_in*index];
for(j = 0; j < w_in; ++j)
{
array_in_tmp_flt[j] *= 100.0 / xyz_factor_in;
}
} else
if(data_type_in == oyDOUBLE)
{
array_in_tmp_dbl = (double*) &array_in_tmp[stride_in*index];
for(j = 0; j < w_in; ++j)
{
array_in_tmp_dbl[j] *= 100.0 / xyz_factor_in;
}
}
lcmsDoTransform( ltw->lcms, &array_in_tmp[stride_in*index],
array_out_data[k], n );
} else
lcmsDoTransform( ltw->lcms, array_in_data[k],
array_out_data[k], n );
if(array_out_tmp && use_xyz_scale)
{
if(data_type_out == oyFLOAT)
{
array_out_tmp_flt = (float*) array_out_data[k];
for(j = 0; j < w_out; ++j)
array_out_tmp_flt[j] *= xyz_factor_out / 100.0;
} else
if(data_type_out == oyDOUBLE)
{
array_out_tmp_dbl = (double*) array_out_data[k];
for(j = 0; j < w_out; ++j)
array_out_tmp_dbl[j] *= xyz_factor_out / 100.0;
}
}
}
} else
for( k = 0; k < array_out_height; ++k)
{
if(array_in_tmp && use_xyz_scale)
{
memcpy( array_in_tmp, array_in_data[k], w_in * bps_in );
if(data_type_in == oyFLOAT)
for(j = 0; j < w_in; ++j)
{
array_in_tmp_flt[j] *= 100.0 / xyz_factor_in;
}
if(data_type_in == oyDOUBLE)
for(j = 0; j < w_in; ++j)
{
array_in_tmp_dbl[j] *= 100.0 / xyz_factor_in;
}
lcmsDoTransform( ltw->lcms, array_in_tmp,
array_out_data[k], n );
} else
lcmsDoTransform( ltw->lcms, array_in_data[k],
array_out_data[k], n );
if(array_out_tmp && use_xyz_scale)
{
if(data_type_out == oyFLOAT)
{
array_out_tmp_flt = (float*) array_out_data[k];
for(j = 0; j < w_out; ++j)
array_out_tmp_flt[j] *= xyz_factor_out / 100.0;
} else
if(data_type_out == oyDOUBLE)
{
array_out_tmp_dbl = (double*) array_out_data[k];
for(j = 0; j < w_out; ++j)
array_out_tmp_dbl[j] *= xyz_factor_out / 100.0;
}
}
}
/*message(oyMSG_WARN,(oyStruct_s*)ticket, "%s: %d End width: %d",
__FILE__,__LINE__, n);*/
}
if(array_in_tmp)
oyDeAllocateFunc_( array_in_tmp );
} else
{
oyFilterGraph_s * ticket_graph = oyPixelAccess_GetGraph( ticket );
oyOptions_s * ticket_graph_opts =
oyFilterGraph_GetOptions( ticket_graph );
if(error)
oyFilterSocket_Callback( requestor_plug,
oyCONNECTOR_EVENT_INCOMPATIBLE_CONTEXT );
else
oyFilterSocket_Callback( requestor_plug,
oyCONNECTOR_EVENT_OK );
error = oyOptions_SetFromString( &ticket_graph_opts,
"//" OY_TYPE_STD "/profile/dirty", "true", OY_CREATE_NEW );
oyFilterGraph_Release( &ticket_graph );
oyOptions_Release( &ticket_graph_opts );
error = 1;
}
if(oyImage_GetPixelLayout( image_input, oyLAYOUT ) !=
oyImage_GetPixelLayout( image_output, oyLAYOUT ))
oyPixelAccess_Release( &new_ticket );
oyFilterPlug_Release( &plug );
oyFilterSocket_Release( &socket );
oyFilterNode_Release( &input_node );
oyFilterNode_Release( &node );
oyImage_Release( &image_input );
oyImage_Release( &image_output );
oyArray2d_Release( &array_in );
oyArray2d_Release( &array_out );
return error;
}
/*
oyPointer oyCMMallocateFunc ( size_t size )
{
oyPointer p = 0;
if(size)
p = malloc(size);
return p;
}
void oyCMMdeallocateFunc ( oyPointer mem )
{
if(mem)
free(mem);
}*/
/** Function lcmsErrorHandlerFunction
* @brief
*
* @version Oyranos: 0.1.8
* @date 2007/11/00
* @since 2007/11/00 (Oyranos: 0.1.8)
*/
int lcmsErrorHandlerFunction(int ErrorCode, const char *ErrorText)
{
int code = 0;
switch(ErrorCode) {
case LCMS_ERRC_WARNING: code = oyMSG_WARN; break;
case LCMS_ERRC_RECOVERABLE: code = oyMSG_WARN; break;
case LCMS_ERRC_ABORTED: code = oyMSG_ERROR; break;
default: code = ErrorCode;
}
lcms_msg( code, 0, ErrorText, 0 );
return 0;
}
/** Function lcmsCMMMessageFuncSet
* @brief
*
* @version Oyranos: 0.1.8
* @date 2007/11/00
* @since 2007/11/00 (Oyranos: 0.1.8)
*/
int lcmsCMMMessageFuncSet ( oyMessage_f message_func )
{
lcms_msg = message_func;
return 0;
}
char lcms_extra_options[] = {
"\n\
<" OY_TOP_SHARED ">\n\
<" OY_DOMAIN_INTERNAL ">\n\
<" OY_TYPE_STD ">\n\
<" "icc_color" ">\n\
<cmyk_cmyk_black_preservation.advanced>0</cmyk_cmyk_black_preservation.advanced>\n\
<precalculation.advanced>0</precalculation.advanced>\n\
</" "icc_color" ">\n\
</" OY_TYPE_STD ">\n\
</" OY_DOMAIN_INTERNAL ">\n\
</" OY_TOP_SHARED ">\n"
};
#define A(long_text) STRING_ADD( tmp, long_text)
/** Function lcmsGetOptionsUI
* @brief return XFORMS for matching options
*
* @version Oyranos: 0.9.5
* @date 2014/01/08
* @since 2009/07/29 (Oyranos: 0.1.10)
*/
int lcmsGetOptionsUI ( oyCMMapiFilter_s * module OY_UNUSED,
oyOptions_s * options,
int flags,
char ** ui_text,
oyAlloc_f allocateFunc )
{
char * tmp = 0;
tmp = (char *)oyOptions_FindString( options,
"cmyk_cmyk_black_preservation", 0 );
if(tmp == 0)
return 0;
tmp = oyStringCopy_( "\
<xf:group type=\"frame\">\
<xf:label>little CMS ", oyAllocateFunc_ );
A( _("Extended Options"));
A( ":</xf:label>\n");
A("\
<xf:select1 ref=\"/" OY_TOP_SHARED "/" OY_DOMAIN_INTERNAL "/" OY_TYPE_STD "/" "icc_color/cmyk_cmyk_black_preservation\">\n\
<xf:label>" );
A( _("Black Preservation"));
A( "</xf:label>\n\
<xf:hint>" );
A( _("Decide how to preserve the black channel for Cmyk to Cmyk transforms"));
A( "</xf:hint>\n\
<xf:help>" );
A( _("Cmyk to Cmyk transforms can provide various strategies to preserve the black only channel. None means, black might change to Cmy and thus text prints not very well. LittleCMS has added two different modes to deal with that: Black-ink-only preservation and black-plane preservation. The first is simple and effective: do all the colorimetric transforms but keep only K (preserving L*) where the source image is only black. The second mode is fair more complex and tries to preserve the WHOLE K plane."));
A( "</xf:help>\n\
<xf:choices>\n\
<xf:item>\n\
<xf:value>0</xf:value>\n\
<xf:label>none</xf:label>\n\
</xf:item>\n\
<xf:item>\n\
<xf:value>1</xf:value>\n\
<xf:label>LCMS_PRESERVE_PURE_K</xf:label>\n\
</xf:item>\n\
<xf:item>\n\
<xf:value>2</xf:value>\n\
<xf:label>LCMS_PRESERVE_K_PLANE</xf:label>\n\
</xf:item>\n\
</xf:choices>\n\
</xf:select1>\n");
A("\
<xf:select1 ref=\"/" OY_TOP_SHARED "/" OY_DOMAIN_INTERNAL "/" OY_TYPE_STD "/" "icc_color/precalculation\">\n\
<xf:label>" );
A( _("Optimization"));
A( "</xf:label>\n\
<xf:hint>" );
A( _("Color Transforms can be differently stored internally"));
A( "</xf:hint>\n\
<xf:help>" );
A( _("Little CMS tries to optimize profile chains whatever possible. There are some built-in optimization schemes, and you can add new schemas by using a plug-in. This generally improves the performance of the transform, but may introduce a small delay of 1-2 seconds when creating the transform. If you are going to transform just few colors, you don't need this precalculations. Then, the flag cmsFLAGS_NOOPTIMIZE in cmsCreateTransform() can be used to inhibit the optimization process. See the API reference for a more detailed discussion of the flags."));
A( "</xf:help>\n\
<xf:choices>\n\
<xf:item>\n\
<xf:value>0</xf:value>\n\
<xf:label>LCMS_NOOPTIMIZE</xf:label>\n\
</xf:item>\n\
<xf:item>\n\
<xf:value>1</xf:value>\n\
<xf:label>normal</xf:label>\n\
</xf:item>\n\
<xf:item>\n\
<xf:value>2</xf:value>\n\
<xf:label>LCMS_HIGHRESPRECALC</xf:label>\n\
</xf:item>\n\
<xf:item>\n\
<xf:value>3</xf:value>\n\
<xf:label>LCMS_LOWRESPRECALC</xf:label>\n\
</xf:item>\n\
</xf:choices>\n\
</xf:select1>\n\
</xf:group>\n");
if(allocateFunc && tmp)
{
char * t = oyStringCopy_( tmp, allocateFunc );
oyFree_m_( tmp );
tmp = t; t = 0;
} else
return 1;
*ui_text = tmp;
return 0;
}
/** Function lcmsCreateICCMatrixProfile
* @brief ICC from EDID
*
* @version Oyranos: 0.1.10
* @since 2009/10/24 (Oyranos: 0.1.10)
* @date 2009/12/10
*/
oyProfile_s * lcmsCreateICCMatrixProfile (
float gamma,
float rx, float ry,
float gx, float gy,
float bx, float by,
float wx, float wy)
{
cmsCIExyYTRIPLE p;
LPGAMMATABLE g[3] = {0,0,0};
/* 0.31271, 0.32902 D65 */
cmsCIExyY wtpt_xyY;
cmsHPROFILE lp = 0;
size_t size = 0;
char * data = 0;
int error = 0;
oyProfile_s * prof = 0;
p.Red.x = rx;
p.Red.y = ry;
p.Red.Y = 1.0;
p.Green.x = gx;
p.Green.y = gy;
p.Green.Y = 1.0;
p.Blue.x = bx;
p.Blue.y = by;
p.Blue.Y = 1.0;
wtpt_xyY.x = wx;
wtpt_xyY.y = wy;
wtpt_xyY.Y = 1.0;
g[0] = g[1] = g[2] = lcmsBuildGamma(1, (double)gamma);
lcms_msg( oyMSG_DBG,0, OY_DBG_FORMAT_
" red: %g %g %g green: %g %g %g blue: %g %g %g white: %g %g gamma: %g",
OY_DBG_ARGS_, rx,ry,p.Red.Y, gx,gy,p.Green.Y,bx,by,p.Blue.Y,wx,wy,gamma );
lp = lcmsCreateRGBProfile( &wtpt_xyY, &p, g);
l_cmsSaveProfileToMem( lp, 0, &size );
data = oyAllocateFunc_( size );
l_cmsSaveProfileToMem( lp, data, &size );
lcmsCloseProfile( lp );
lcmsFreeGamma( g[0] );
prof = oyProfile_FromMem( size, data, 0,0 );
error = oyProfile_AddTagText( prof, icSigCopyrightTag,
"no copyright; use freely" );
if(error) WARNc2_S("%s %d", _("found issues"),error);
oyDeAllocateFunc_( data ); size = 0;
return prof;
}
/**
* This function implements oyMOptions_Handle_f.
*
* @version Oyranos: 0.1.10
* @since 2009/12/11 (Oyranos: 0.1.10)
* @date 2009/12/11
*/
int lcmsMOptions_Handle ( oyOptions_s * options,
const char * command,
oyOptions_s ** result )
{
oyOption_s * o = 0;
oyProfile_s * prof = 0;
int error = 0;
double val = 0.0;
if(oyFilterRegistrationMatch(command,"can_handle", 0))
{
if(oyFilterRegistrationMatch(command,"create_profile", 0))
{
o = oyOptions_Find( options,
"color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma",
oyNAME_PATTERN );
error = oyOptions_FindDouble( options,
"color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma",
8, &val );
if(!o)
{
error = -1;
} else if( error != 0 )
{
lcms_msg( oyMSG_WARN, (oyStruct_s*)options, OY_DBG_FORMAT_" "
"option \"color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma\" %s",
OY_DBG_ARGS_,
(error < 0) ? "contains less than 9 required values" :
"access returned with error" );
}
oyOption_Release( &o );
return error;
}
else
return -1;
}
else if(oyFilterRegistrationMatch(command,"create_profile", 0))
{
o = oyOptions_Find( options,
"color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma",
oyNAME_PATTERN );
if(o)
{
error = oyOptions_FindDouble( options,
"color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma",
8, &val );
if( error != 0 )
{
lcms_msg( oyMSG_WARN, (oyStruct_s*)options, OY_DBG_FORMAT_" "
"option \"color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma\" %s",
OY_DBG_ARGS_,
(error < 0) ? "contains less than 9 required values" :
"access returned with error" );
}
prof = lcmsCreateICCMatrixProfile (
oyOption_GetValueDouble(o,8),
oyOption_GetValueDouble(o,0), oyOption_GetValueDouble(o,1),
oyOption_GetValueDouble(o,2), oyOption_GetValueDouble(o,3),
oyOption_GetValueDouble(o,4), oyOption_GetValueDouble(o,5),
oyOption_GetValueDouble(o,6), oyOption_GetValueDouble(o,7));
oyOption_Release( &o );
o = oyOption_FromRegistration( OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH "icc_profile.create_profile.color_matrix._" CMM_NICK,
0 );
error = oyOption_MoveInStruct( o, (oyStruct_s**) &prof );
if(!*result)
*result = oyOptions_New(0);
oyOptions_MoveIn( *result, &o, -1 );
} else
lcms_msg( oyMSG_WARN, (oyStruct_s*)options, OY_DBG_FORMAT_ " "
"no option \"color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma\" found",
OY_DBG_ARGS_ );
}
return 0;
}
/**
* This function implements oyCMMinfoGetText_f.
*
* @version Oyranos: 0.3.0
* @since 2011/02/21 (Oyranos: 0.3.0)
* @date 2011/02/21
*/
const char * lcmsInfoGetTextProfileC2( const char * select,
oyNAME_e type,
oyStruct_s * context OY_UNUSED )
{
if(strcmp(select, "can_handle")==0)
{
if(type == oyNAME_NICK)
return "check";
else if(type == oyNAME_NAME)
return _("check");
else
return _("Check if this module can handle a certain command.");
} else if(strcmp(select, "create_profile")==0)
{
if(type == oyNAME_NICK)
return "proofing_effect";
else if(type == oyNAME_NAME)
return _("Create a ICC abstract proofing profile.");
else
return _("The littleCMS \"create_profile.proofing_effect\" command lets you create ICC abstract profiles from a given ICC profile for proofing. The filter expects a oyOption_s object with name \"proofing_profile\" containing a oyProfile_s as value. The options \"rendering_intent\", \"rendering_intent_proof\", \"rendering_bpc\", \"rendering_gamut_warning\", \"precalculation\" and \"cmyk_cmyk_black_preservation\" are honoured. The result will appear in \"icc_profile\" with the additional attributes \"create_profile.proofing_effect\" as a oyProfile_s object.");
} else if(strcmp(select, "help")==0)
{
if(type == oyNAME_NICK)
return "help";
else if(type == oyNAME_NAME)
return _("Create a ICC proofing profile.");
else
return _("The littleCMS \"create_profile.proofing_effect\" command lets you create ICC abstract profiles from some given ICC profile. See the \"proofing_effect\" info item.");
}
return 0;
}
const char *lcms_texts_profile_create[4] = {"can_handle","create_profile","help",0};
/** @instance lcms_api10_cmm2
* @brief littleCMS oyCMMapi10_s implementation
*
* a filter for proofing effect profile creation
*
* @version Oyranos: 0.3.0
* @since 2011/02/21 (Oyranos: 0.3.0)
* @date 2011/02/21
*/
oyCMMapi10_s_ lcms_api10_cmm2 = {
oyOBJECT_CMM_API10_S,
0,0,0,
0,
lcmsCMMInit,
lcmsCMMMessageFuncSet,
OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH
"create_profile.proofing_effect.icc._" CMM_NICK "._CPU",
CMM_VERSION,
CMM_API_VERSION, /**< int32_t module_api[3] */
0, /* id_; keep empty */
0, /* api5_; keep empty */
0, /* runtime_context */
lcmsInfoGetTextProfileC2, /**< getText */
(char**)lcms_texts_profile_create, /**<texts; list of arguments to getText*/
lcmsMOptions_Handle2 /**< oyMOptions_Handle_f oyMOptions_Handle */
};
/**
* This function implements oyCMMinfoGetText_f.
*
* @version Oyranos: 0.1.10
* @since 2009/12/11 (Oyranos: 0.1.10)
* @date 2009/12/11
*/
const char * lcmsInfoGetTextProfileC ( const char * select,
oyNAME_e type,
oyStruct_s * context OY_UNUSED )
{
if(strcmp(select, "can_handle")==0)
{
if(type == oyNAME_NICK)
return "check";
else if(type == oyNAME_NAME)
return _("check");
else
return _("Check if this module can handle a certain command.");
} else if(strcmp(select, "create_profile")==0)
{
if(type == oyNAME_NICK)
return "create_profile";
else if(type == oyNAME_NAME)
return _("Create a ICC matrix profile.");
else
return _("The littleCMS \"create_profile.color_matrix\" command lets you create ICC profiles from some given colorimetric coordinates. The filter expects a oyOption_s object with name \"color_matrix.redx_redy_greenx_greeny_bluex_bluey_whitex_whitey_gamma\" containing 9 floats in the order of CIE*x for red, CIE*y for red, CIE*x for green, CIE*y for green, CIE*x for blue, CIE*y for blue, CIE*x for white, CIE*y for white and a gamma value. The result will appear in \"icc_profile\" with the additional attributes \"create_profile.color_matrix\".");
} else if(strcmp(select, "help")==0)
{
if(type == oyNAME_NICK)
return "help";
else if(type == oyNAME_NAME)
return _("Create a ICC matrix profile.");
else
return _("The littleCMS \"create_profile.color_matrix\" command lets you create ICC profiles from some given colorimetric coordinates. See the \"create_profile\" info item.");
}
return 0;
}
/** @instance lcms_api10_cmm
* @brief littleCMS oyCMMapi10_s implementation
*
* a filter for simple profile creation
*
* @version Oyranos: 0.1.10
* @since 2009/12/11 (Oyranos: 0.1.10)
* @date 2009/12/11
*/
oyCMMapi10_s_ lcms_api10_cmm = {
oyOBJECT_CMM_API10_S,
0,0,0,
(oyCMMapi_s*) & lcms_api10_cmm2,
lcmsCMMInit,
lcmsCMMMessageFuncSet,
OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH
"create_profile.color_matrix.icc._" CMM_NICK "._CPU",
CMM_VERSION,
CMM_API_VERSION, /**< int32_t module_api[3] */
0, /* id_; keep empty */
0, /* api5_; keep empty */
0, /* runtime_context */
lcmsInfoGetTextProfileC, /**< getText */
(char**)lcms_texts_profile_create, /**<texts; list of arguments to getText*/
lcmsMOptions_Handle /**< oyMOptions_Handle_f oyMOptions_Handle */
};
/** @instance lcms_api6
* @brief littleCMS oyCMMapi6_s implementation
*
* a filter providing CMM API's
*
* @version Oyranos: 0.1.10
* @since 2008/12/28 (Oyranos: 0.1.10)
* @date 2008/12/28
*/
oyCMMapi6_s_ lcms_api6_cmm = {
oyOBJECT_CMM_API6_S,
0,0,0,
(oyCMMapi_s*) & lcms_api10_cmm,
lcmsCMMInit,
lcmsCMMMessageFuncSet,
OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH
"icc_color._" CMM_NICK "._CPU." oyCOLOR_ICC_DEVICE_LINK "_" lcmsTRANSFORM,
CMM_VERSION,
CMM_API_VERSION, /**< int32_t module_api[3] */
0, /* id_; keep empty */
0, /* api5_; keep empty */
0, /* runtime_context */
oyCOLOR_ICC_DEVICE_LINK, /* data_type_in, "oyDL" */
lcmsTRANSFORM, /* data_type_out, "lcCC" */
lcmsModuleData_Convert /* oyModuleData_Convert_f oyModuleData_Convert */
};
/** @instance lcms_api7
* @brief littleCMS oyCMMapi7_s implementation
*
* a filter providing CMM API's
*
* @version Oyranos: 0.1.10
* @since 2008/12/27 (Oyranos: 0.1.10)
* @date 2008/12/27
*/
oyCMMapi7_s_ lcms_api7_cmm = {
oyOBJECT_CMM_API7_S,
0,0,0,
(oyCMMapi_s*) & lcms_api6_cmm,
lcmsCMMInit,
lcmsCMMMessageFuncSet,
OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH
"icc_color._" CMM_NICK "._icc_version_2._icc_version_4._CPU._NOACCEL",
CMM_VERSION,
CMM_API_VERSION, /**< int32_t module_api[3] */
0, /* id_; keep empty */
0, /* api5_; keep empty */
0, /* runtime_context */
lcmsFilterPlug_CmmIccRun, /* oyCMMFilterPlug_Run_f */
lcmsTRANSFORM, /* data_type, "lcCC" */
(oyConnector_s**) lcms_cmmIccPlug_connectors,/* plugs */
1, /* plugs_n */
0, /* plugs_last_add */
(oyConnector_s**) lcms_cmmIccSocket_connectors, /* sockets */
1, /* sockets_n */
0, /* sockets_last_add */
NULL /* char ** properties */
};
/**
* This function implements oyCMMGetText_f.
*
* @version Oyranos: 0.1.10
* @since 2009/12/22 (Oyranos: 0.1.10)
* @date 2009/12/22
*/
const char * lcmsApi4UiGetText (
const char * select,
oyNAME_e type,
oyStruct_s * context )
{
static char * category = 0;
if(strcmp(select,"name") ||
strcmp(select,"help"))
{
return lcmsInfoGetText( select, type, context );
}
else if(strcmp(select,"category"))
{
if(!category)
{
STRING_ADD( category, _("Color") );
STRING_ADD( category, _("/") );
/* CMM: abbreviation for Color Matching Module */
STRING_ADD( category, _("CMM") );
STRING_ADD( category, _("/") );
STRING_ADD( category, _("littleCMS") );
}
if(type == oyNAME_NICK)
return "category";
else if(type == oyNAME_NAME)
return category;
else
return category;
}
return 0;
}
const char * lcms_api4_ui_texts[] = {"name", "category", "help", 0};
/** @instance lcms_api4_ui
* @brief lcms oyCMMapi4_s::ui implementation
*
* The UI for lcms.
*
* @version Oyranos: 0.1.10
* @since 2009/09/09 (Oyranos: 0.1.10)
* @date 2009/09/09
*/
oyCMMui_s_ lcms_api4_ui = {
oyOBJECT_CMM_DATA_TYPES_S, /**< oyOBJECT_e type; */
0,0,0, /* unused oyStruct_s fields; keep to zero */
CMM_VERSION, /**< int32_t version[3] */
CMM_API_VERSION, /**< int32_t module_api[3] */
lcmsFilter_CmmIccValidateOptions, /* oyCMMFilter_ValidateOptions_f */
lcmsWidgetEvent, /* oyWidgetEvent_f */
"Color/CMM/littleCMS", /* category */
lcms_extra_options, /* const char * options */
lcmsGetOptionsUI, /* oyCMMuiGet_f oyCMMuiGet */
lcmsApi4UiGetText, /* oyCMMGetText_f getText */
lcms_api4_ui_texts, /* const char ** texts */
(oyCMMapiFilter_s*)&lcms_api4_cmm /* oyCMMapiFilter_s*parent */
};
/** @instance lcms_api4_cmm
* @brief littleCMS oyCMMapi4_s implementation
*
* a filter providing CMM API's
*
* @version Oyranos: 0.1.8
* @since 2008/07/18 (Oyranos: 0.1.8)
* @date 2008/07/18
*/
oyCMMapi4_s_ lcms_api4_cmm = {
oyOBJECT_CMM_API4_S,
0,0,0,
(oyCMMapi_s*) & lcms_api7_cmm,
lcmsCMMInit,
lcmsCMMMessageFuncSet,
OY_TOP_SHARED OY_SLASH OY_DOMAIN_INTERNAL OY_SLASH OY_TYPE_STD OY_SLASH
"icc_color._" CMM_NICK "._icc_version_2._icc_version_4._CPU._NOACCEL._effect",
CMM_VERSION,
CMM_API_VERSION, /**< int32_t module_api[3] */
0, /* id_; keep empty */
0, /* api5_; keep empty */
0, /* runtime_context */
lcmsFilterNode_CmmIccContextToMem, /* oyCMMFilterNode_ContextToMem_f */
lcmsFilterNode_GetText, /* oyCMMFilterNode_GetText_f */
oyCOLOR_ICC_DEVICE_LINK, /* context data_type */
&lcms_api4_ui /**< oyCMMui_s *ui */
};
/**
* This function implements oyCMMinfoGetText_f.
*
* @version Oyranos: 0.1.10
* @since 2008/12/23 (Oyranos: 0.1.10)
* @date 2008/12/30
*/
const char * lcmsInfoGetText ( const char * select,
oyNAME_e type,
oyStruct_s * context OY_UNUSED )
{
if(strcmp(select, "name")==0)
{
if(type == oyNAME_NICK)
return CMM_NICK;
else if(type == oyNAME_NAME)
return _("Little CMS");
else
return _("LittleCMS is a CMM, a color management engine; it implements fast transforms between ICC profiles. \"Little\" stands for its small overhead. With a typical footprint of about 100K including C runtime, you can color-enable your application without the pain of ActiveX, OCX, redistributables or binaries of any kind. We are using little cms in several commercial projects, however, we are offering lcms library free for anybody under an extremely liberal open source license.");
} else if(strcmp(select, "manufacturer")==0)
{
if(type == oyNAME_NICK)
return "Marti";
else if(type == oyNAME_NAME)
return "Marti Maria";
else
return _("littleCMS project; www: http://www.littlecms.com; support/email: support@littlecms.com; sources: http://www.littlecms.com/downloads.htm; Oyranos wrapper: Kai-Uwe Behrmann for the Oyranos project");
} else if(strcmp(select, "copyright")==0)
{
if(type == oyNAME_NICK)
return "MIT";
else if(type == oyNAME_NAME)
return _("Copyright (c) 1998-2008 Marti Maria Saguer; MIT");
else
return _("MIT license: http://www.opensource.org/licenses/mit-license.php");
} else if(strcmp(select, "help")==0)
{
if(type == oyNAME_NICK)
return "help";
else if(type == oyNAME_NAME)
return _("The lcms \"color.icc\" filter is a one dimensional color conversion filter. It can both create a color conversion context, some precalculated for processing speed up, and the color conversion with the help of that context. The adaption part of this filter transforms the Oyranos color context, which is ICC device link based, to the internal lcms format.");
else
return _("The following options are available to create color contexts:\n \"profiles_simulation\", a option of type oyProfiles_s, can contain device profiles for proofing.\n \"profiles_effect\", a option of type oyProfiles_s, can contain abstract color profiles.\n The following Oyranos options are supported: \"rendering_gamut_warning\", \"rendering_intent_proof\", \"rendering_bpc\", \"rendering_intent\", \"proof_soft\" and \"proof_hard\".\n The additional lcms options are supported \"cmyk_cmyk_black_preservation\" [0 - none; 1 - LCMS_PRESERVE_PURE_K; 2 - LCMS_PRESERVE_K_PLANE] and \"precalculation\".");
}
return 0;
}
const char *lcms_texts[5] = {"name","copyright","manufacturer","help",0};
oyIcon_s lcms_icon = {oyOBJECT_ICON_S, 0,0,0, 0,0,0, "lcms_logo2.png"};
/** @instance lcms_cmm_module
* @brief lcms module infos
*
* @version Oyranos: 0.1.10
* @since 2007/11/00 (Oyranos: 0.1.8)
* @date 2008/12/30
*/
oyCMM_s lcms_cmm_module = {
oyOBJECT_CMM_INFO_S, /**< type, struct type */
0,0,0, /**< ,dynamic object functions */
CMM_NICK, /**< cmm, ICC signature */
"0.6", /**< backend_version */
lcmsInfoGetText, /**< getText */
(char**)lcms_texts, /**<texts; list of arguments to getText*/
OYRANOS_VERSION, /**< oy_compatibility */
(oyCMMapi_s*) & lcms_api4_cmm, /**< api */
&lcms_icon, /**< icon */
NULL /**< init() */
};
|
GB_unop__one_fc64_fc64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__one_fc64_fc64)
// op(A') function: GB (_unop_tran__one_fc64_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// cast: ;
// unaryop: cij = GxB_CMPLX(1,0)
#define GB_ATYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
;
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = GxB_CMPLX(1,0) ;
// casting
#define GB_CAST(z, aij) \
; ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
; ; \
/* Cx [pC] = op (cast (aij)) */ \
; ; \
Cx [pC] = GxB_CMPLX(1,0) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ONE || GxB_NO_FC64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__one_fc64_fc64)
(
GxB_FC64_t *Cx, // Cx and Ax may be aliased
const GxB_FC64_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
// TODO: if OP is ONE and uniform-valued matrices are exploited, then
// do this in O(1) time
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (GxB_FC64_t), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
; ;
; ;
Cx [p] = GxB_CMPLX(1,0) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
; ;
; ;
Cx [p] = GxB_CMPLX(1,0) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__one_fc64_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
normal.c | // RUN: %libomp-compile-and-run | FileCheck %s
// RUN: %libomp-compile-and-run | %sort-threads | FileCheck --check-prefix=THREADS %s
// REQUIRES: ompt
#include "callback.h"
int main()
{
#pragma omp parallel num_threads(4)
{
print_ids(0);
print_ids(1);
}
// Check if libomp supports the callbacks for this test.
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_thread_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_thread_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_callback_parallel_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_implicit_task_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_barrier_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_barrier_end'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_wait_barrier_begin'
// CHECK-NOT: {{^}}0: Could not register callback 'ompt_event_wait_barrier_end'
// CHECK: 0: NULL_POINTER=[[NULL:.*$]]
// CHECK: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=0x{{[0-f]+}}, invoker=[[PARALLEL_INVOKER:[0-9]+]]
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// Note that we cannot ensure that the worker threads have already called barrier_end and implicit_task_end before parallel_end!
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK-DAG: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// CHECK-DAG: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// CHECK: {{^}}[[MASTER_ID]]: ompt_event_parallel_end: parallel_id=[[PARALLEL_ID]], task_id=[[PARENT_TASK_ID]], invoker=[[PARALLEL_INVOKER]]
// THREADS: 0: NULL_POINTER=[[NULL:.*$]]
// THREADS: {{^}}[[MASTER_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_initial=1, thread_id=[[MASTER_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_parallel_begin: parent_task_id=[[PARENT_TASK_ID:[0-9]+]], parent_task_frame.exit=[[NULL]], parent_task_frame.reenter={{0x[0-f]+}}, parallel_id=[[PARALLEL_ID:[0-9]+]], requested_team_size=4, parallel_function=0x{{[0-f]+}}, invoker={{.*}}
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[MASTER_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: task level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[MASTER_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID:[0-9]+]]: ompt_event_thread_begin: thread_type=ompt_thread_worker=2, thread_id=[[THREAD_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID:[0-9]+]]
// THREADS: {{^}}[[THREAD_ID]]: task level 0: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: task level 1: parallel_id=0, task_id=[[PARENT_TASK_ID]]
// THREADS-NOT: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_begin: parallel_id=[[PARALLEL_ID]], task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_barrier_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
// THREADS: {{^}}[[THREAD_ID]]: ompt_event_implicit_task_end: parallel_id={{[0-9]+}}, task_id=[[IMPLICIT_TASK_ID]]
return 0;
}
|
image-view.c | /*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% IIIII M M AAA GGGG EEEEE %
% I MM MM A A G E %
% I M M M AAAAA G GG EEE %
% I M M A A G G E %
% IIIII M M A A GGGG EEEEE %
% %
% V V IIIII EEEEE W W %
% V V I E W W %
% V V I EEE W W W %
% V V I E WW WW %
% V IIIII EEEEE W W %
% %
% %
% MagickCore Image View Methods %
% %
% Software Design %
% Cristy %
% March 2003 %
% %
% %
% Copyright 1999-2016 ImageMagick Studio LLC, a non-profit organization %
% dedicated to making software imaging solutions freely available. %
% %
% You may not use this file except in compliance with the License. You may %
% obtain a copy of the License at %
% %
% http://www.imagemagick.org/script/license.php %
% %
% Unless required by applicable law or agreed to in writing, software %
% distributed under the License is distributed on an "AS IS" BASIS, %
% WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. %
% See the License for the specific language governing permissions and %
% limitations under the License. %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
%
%
*/
/*
Include declarations.
*/
#include "magick/studio.h"
#include "magick/MagickCore.h"
#include "magick/exception-private.h"
#include "magick/monitor-private.h"
#include "magick/thread-private.h"
/*
Typedef declarations.
*/
struct _ImageView
{
char
*description;
RectangleInfo
extent;
Image
*image;
CacheView
*view;
size_t
number_threads;
ExceptionInfo
*exception;
MagickBooleanType
debug;
size_t
signature;
};
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% C l o n e I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% CloneImageView() makes a copy of the specified image view.
%
% The format of the CloneImageView method is:
%
% ImageView *CloneImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *CloneImageView(const ImageView *image_view)
{
ImageView
*clone_view;
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
clone_view=(ImageView *) AcquireMagickMemory(sizeof(*clone_view));
if (clone_view == (ImageView *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(clone_view,0,sizeof(*clone_view));
clone_view->description=ConstantString(image_view->description);
clone_view->extent=image_view->extent;
clone_view->view=CloneCacheView(image_view->view);
clone_view->number_threads=image_view->number_threads;
clone_view->exception=AcquireExceptionInfo();
InheritException(clone_view->exception,image_view->exception);
clone_view->debug=image_view->debug;
clone_view->signature=MagickSignature;
return(clone_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D e s t r o y I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DestroyImageView() deallocates memory associated with a image view.
%
% The format of the DestroyImageView method is:
%
% ImageView *DestroyImageView(ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport ImageView *DestroyImageView(ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
if (image_view->description != (char *) NULL)
image_view->description=DestroyString(image_view->description);
image_view->view=DestroyCacheView(image_view->view);
image_view->exception=DestroyExceptionInfo(image_view->exception);
image_view->signature=(~MagickSignature);
image_view=(ImageView *) RelinquishMagickMemory(image_view);
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% D u p l e x T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% DuplexTransferImageViewIterator() iterates over three image views in
% parallel and calls your transfer method for each scanline of the view. The
% source and duplex pixel extent is not confined to the image canvas-- that is
% you can include negative offsets or widths or heights that exceed the image
% dimension. However, the destination image view is confined to the image
% canvas-- that is no negative offsets or widths or heights that exceed the
% image dimension are permitted.
%
% The callback signature is:
%
% MagickBooleanType DuplexTransferImageViewMethod(const ImageView *source,
% const ImageView *duplex,ImageView *destination,const ssize_t y,
% const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the DuplexTransferImageViewIterator method is:
%
% MagickBooleanType DuplexTransferImageViewIterator(ImageView *source,
% ImageView *duplex,ImageView *destination,
% DuplexTransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o duplex: the duplex image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType DuplexTransferImageViewIterator(
ImageView *source,ImageView *duplex,ImageView *destination,
DuplexTransferImageViewMethod transfer,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickSignature);
if (transfer == (DuplexTransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const PixelPacket
*magick_restrict duplex_pixels,
*magick_restrict pixels;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
duplex_pixels=GetCacheViewVirtualPixels(duplex->view,duplex->extent.x,y,
duplex->extent.width,1,duplex->exception);
if (duplex_pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,duplex,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_DuplexTransferImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticIndexes() returns the image view authentic indexes.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% IndexPacket *GetImageViewAuthenticIndexes(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport IndexPacket *GetImageViewAuthenticIndexes(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
return(GetCacheViewAuthenticIndexQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w A u t h e n t i c P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewAuthenticPixels() returns the image view authentic pixels.
%
% The format of the GetImageViewAuthenticPixels method is:
%
% PixelPacket *GetImageViewAuthenticPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport PixelPacket *GetImageViewAuthenticPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
return(GetCacheViewAuthenticPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x c e p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewException() returns the severity, reason, and description of any
% error that occurs when utilizing a image view.
%
% The format of the GetImageViewException method is:
%
% char *GetImageViewException(const PixelImage *image_view,
% ExceptionType *severity)
%
% A description of each parameter follows:
%
% o image_view: the pixel image_view.
%
% o severity: the severity of the error is returned here.
%
*/
MagickExport char *GetImageViewException(const ImageView *image_view,
ExceptionType *severity)
{
char
*description;
assert(image_view != (const ImageView *) NULL);
assert(image_view->signature == MagickSignature);
assert(severity != (ExceptionType *) NULL);
*severity=image_view->exception->severity;
description=(char *) AcquireQuantumMemory(2UL*MaxTextExtent,
sizeof(*description));
if (description == (char *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
*description='\0';
if (image_view->exception->reason != (char *) NULL)
(void) CopyMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->reason),
MaxTextExtent);
if (image_view->exception->description != (char *) NULL)
{
(void) ConcatenateMagickString(description," (",MaxTextExtent);
(void) ConcatenateMagickString(description,GetLocaleExceptionMessage(
image_view->exception->severity,image_view->exception->description),
MaxTextExtent);
(void) ConcatenateMagickString(description,")",MaxTextExtent);
}
return(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w E x t e n t %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewExtent() returns the image view extent.
%
% The format of the GetImageViewExtent method is:
%
% RectangleInfo GetImageViewExtent(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport RectangleInfo GetImageViewExtent(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
return(image_view->extent);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I m a g e %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewImage() returns the image associated with the image view.
%
% The format of the GetImageViewImage method is:
%
% MagickCore *GetImageViewImage(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport Image *GetImageViewImage(const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
return(image_view->image);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewIterator() iterates over the image view in parallel and calls
% your get method for each scanline of the view. The pixel extent is
% not confined to the image canvas-- that is you can include negative offsets
% or widths or heights that exceed the image dimension. Any updates to
% the pixels in your callback are ignored.
%
% The callback signature is:
%
% MagickBooleanType GetImageViewMethod(const ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback get method that must be
% executed by a single thread at a time.
%
% The format of the GetImageViewIterator method is:
%
% MagickBooleanType GetImageViewIterator(ImageView *source,
% GetImageViewMethod get,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o get: the get callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType GetImageViewIterator(ImageView *source,
GetImageViewMethod get,void *context)
{
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickSignature);
if (get == (GetImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register const PixelPacket
*pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
if (get(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_GetImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l I n d e x e s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualIndexes() returns the image view virtual indexes.
%
% The format of the GetImageViewVirtualIndexes method is:
%
% const IndexPacket *GetImageViewVirtualIndexes(
% const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const IndexPacket *GetImageViewVirtualIndexes(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
return(GetCacheViewVirtualIndexQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% G e t I m a g e V i e w V i r t u a l P i x e l s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% GetImageViewVirtualPixels() returns the image view virtual pixels.
%
% The format of the GetImageViewVirtualPixels method is:
%
% const PixelPacket *GetImageViewVirtualPixels(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport const PixelPacket *GetImageViewVirtualPixels(
const ImageView *image_view)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
return(GetCacheViewVirtualPixelQueue(image_view->view));
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% I s I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% IsImageView() returns MagickTrue if the the parameter is verified as a image
% view object.
%
% The format of the IsImageView method is:
%
% MagickBooleanType IsImageView(const ImageView *image_view)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
*/
MagickExport MagickBooleanType IsImageView(const ImageView *image_view)
{
if (image_view == (const ImageView *) NULL)
return(MagickFalse);
if (image_view->signature != MagickSignature)
return(MagickFalse);
return(MagickTrue);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageView() returns a image view required for all other methods in the
% Image View API.
%
% The format of the NewImageView method is:
%
% ImageView *NewImageView(MagickCore *wand)
%
% A description of each parameter follows:
%
% o wand: the wand.
%
*/
MagickExport ImageView *NewImageView(Image *image)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view));
if (image_view == (ImageView *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->image=image;
image_view->exception=AcquireExceptionInfo();
image_view->view=AcquireVirtualCacheView(image_view->image,
image_view->exception);
image_view->extent.width=image->columns;
image_view->extent.height=image->rows;
image_view->extent.x=0;
image_view->extent.y=0;
image_view->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
image_view->debug=IsEventLogging();
image_view->signature=MagickSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% N e w I m a g e V i e w R e g i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% NewImageViewRegion() returns a image view required for all other methods
% in the Image View API.
%
% The format of the NewImageViewRegion method is:
%
% ImageView *NewImageViewRegion(MagickCore *wand,const ssize_t x,
% const ssize_t y,const size_t width,const size_t height)
%
% A description of each parameter follows:
%
% o wand: the magick wand.
%
% o x,y,columns,rows: These values define the perimeter of a extent of
% pixel_wands view.
%
*/
MagickExport ImageView *NewImageViewRegion(Image *image,const ssize_t x,
const ssize_t y,const size_t width,const size_t height)
{
ImageView
*image_view;
assert(image != (Image *) NULL);
assert(image->signature == MagickSignature);
image_view=(ImageView *) AcquireMagickMemory(sizeof(*image_view));
if (image_view == (ImageView *) NULL)
ThrowFatalException(ResourceLimitFatalError,"MemoryAllocationFailed");
(void) ResetMagickMemory(image_view,0,sizeof(*image_view));
image_view->description=ConstantString("ImageView");
image_view->exception=AcquireExceptionInfo();
image_view->view=AcquireVirtualCacheView(image_view->image,
image_view->exception);
image_view->image=image;
image_view->extent.width=width;
image_view->extent.height=height;
image_view->extent.x=x;
image_view->extent.y=y;
image_view->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
image_view->debug=IsEventLogging();
image_view->signature=MagickSignature;
return(image_view);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w D e s c r i p t i o n %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewDescription() associates a description with an image view.
%
% The format of the SetImageViewDescription method is:
%
% void SetImageViewDescription(ImageView *image_view,
% const char *description)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o description: the image view description.
%
*/
MagickExport void SetImageViewDescription(ImageView *image_view,
const char *description)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
image_view->description=ConstantString(description);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewIterator() iterates over the image view in parallel and calls
% your set method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension. The pixels are initiallly
% undefined and any settings you make in the callback method are automagically
% synced back to your image.
%
% The callback signature is:
%
% MagickBooleanType SetImageViewMethod(ImageView *destination,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback set method that must be
% executed by a single thread at a time.
%
% The format of the SetImageViewIterator method is:
%
% MagickBooleanType SetImageViewIterator(ImageView *destination,
% SetImageViewMethod set,void *context)
%
% A description of each parameter follows:
%
% o destination: the image view.
%
% o set: the set callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType SetImageViewIterator(ImageView *destination,
SetImageViewMethod set,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(destination != (ImageView *) NULL);
assert(destination->signature == MagickSignature);
if (set == (SetImageViewMethod) NULL)
return(MagickFalse);
destination_image=destination->image;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (destination->extent.height-destination->extent.y);
#endif
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(destination_image,destination_image,height,1)
#endif
for (y=destination->extent.y; y < (ssize_t) destination->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(destination->view,destination->extent.x,
y,destination->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
continue;
}
if (set(destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
destination->view));
status=MagickFalse;
}
if (destination_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_SetImageViewIterator)
#endif
proceed=SetImageProgress(destination_image,destination->description,
progress++,destination->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% S e t I m a g e V i e w T h r e a d s %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% SetImageViewThreads() sets the number of threads in a thread team.
%
% The format of the SetImageViewDescription method is:
%
% void SetImageViewThreads(ImageView *image_view,
% const size_t number_threads)
%
% A description of each parameter follows:
%
% o image_view: the image view.
%
% o number_threads: the number of threads in a thread team.
%
*/
MagickExport void SetImageViewThreads(ImageView *image_view,
const size_t number_threads)
{
assert(image_view != (ImageView *) NULL);
assert(image_view->signature == MagickSignature);
image_view->number_threads=number_threads;
if (number_threads > (size_t) GetMagickResourceLimit(ThreadResource))
image_view->number_threads=(size_t) GetMagickResourceLimit(ThreadResource);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% T r a n s f e r I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% TransferImageViewIterator() iterates over two image views in parallel and
% calls your transfer method for each scanline of the view. The source pixel
% extent is not confined to the image canvas-- that is you can include
% negative offsets or widths or heights that exceed the image dimension.
% However, the destination image view is confined to the image canvas-- that
% is no negative offsets or widths or heights that exceed the image dimension
% are permitted.
%
% The callback signature is:
%
% MagickBooleanType TransferImageViewMethod(const ImageView *source,
% ImageView *destination,const ssize_t y,const int thread_id,
% void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback transfer method that must be
% executed by a single thread at a time.
%
% The format of the TransferImageViewIterator method is:
%
% MagickBooleanType TransferImageViewIterator(ImageView *source,
% ImageView *destination,TransferImageViewMethod transfer,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o destination: the destination image view.
%
% o transfer: the transfer callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType TransferImageViewIterator(ImageView *source,
ImageView *destination,TransferImageViewMethod transfer,void *context)
{
ExceptionInfo
*exception;
Image
*destination_image,
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickSignature);
if (transfer == (TransferImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
destination_image=destination->image;
if (SetImageStorageClass(destination_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=destination->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,destination_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
MagickBooleanType
sync;
register const PixelPacket
*magick_restrict pixels;
register PixelPacket
*magick_restrict destination_pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewVirtualPixels(source->view,source->extent.x,y,
source->extent.width,1,source->exception);
if (pixels == (const PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
destination_pixels=GetCacheViewAuthenticPixels(destination->view,
destination->extent.x,y,destination->extent.width,1,exception);
if (destination_pixels == (PixelPacket *) NULL)
{
status=MagickFalse;
continue;
}
if (transfer(source,destination,y,id,context) == MagickFalse)
status=MagickFalse;
sync=SyncCacheViewAuthenticPixels(destination->view,exception);
if (sync == MagickFalse)
{
InheritException(destination->exception,GetCacheViewException(
source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_TransferImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
/*
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
% %
% %
% %
% U p d a t e I m a g e V i e w I t e r a t o r %
% %
% %
% %
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%
% UpdateImageViewIterator() iterates over the image view in parallel and calls
% your update method for each scanline of the view. The pixel extent is
% confined to the image canvas-- that is no negative offsets or widths or
% heights that exceed the image dimension are permitted. Updates to pixels
% in your callback are automagically synced back to the image.
%
% The callback signature is:
%
% MagickBooleanType UpdateImageViewMethod(ImageView *source,
% const ssize_t y,const int thread_id,void *context)
%
% Use this pragma if the view is not single threaded:
%
% #pragma omp critical
%
% to define a section of code in your callback update method that must be
% executed by a single thread at a time.
%
% The format of the UpdateImageViewIterator method is:
%
% MagickBooleanType UpdateImageViewIterator(ImageView *source,
% UpdateImageViewMethod update,void *context)
%
% A description of each parameter follows:
%
% o source: the source image view.
%
% o update: the update callback method.
%
% o context: the user defined context.
%
*/
MagickExport MagickBooleanType UpdateImageViewIterator(ImageView *source,
UpdateImageViewMethod update,void *context)
{
ExceptionInfo
*exception;
Image
*source_image;
MagickBooleanType
status;
MagickOffsetType
progress;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
size_t
height;
#endif
ssize_t
y;
assert(source != (ImageView *) NULL);
assert(source->signature == MagickSignature);
if (update == (UpdateImageViewMethod) NULL)
return(MagickFalse);
source_image=source->image;
if (SetImageStorageClass(source_image,DirectClass) == MagickFalse)
return(MagickFalse);
status=MagickTrue;
progress=0;
exception=source->exception;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
height=(size_t) (source->extent.height-source->extent.y);
#pragma omp parallel for schedule(static,4) shared(progress,status) \
magick_threads(source_image,source_image,height,1)
#endif
for (y=source->extent.y; y < (ssize_t) source->extent.height; y++)
{
const int
id = GetOpenMPThreadId();
register PixelPacket
*magick_restrict pixels;
if (status == MagickFalse)
continue;
pixels=GetCacheViewAuthenticPixels(source->view,source->extent.x,y,
source->extent.width,1,exception);
if (pixels == (PixelPacket *) NULL)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
continue;
}
if (update(source,y,id,context) == MagickFalse)
status=MagickFalse;
if (SyncCacheViewAuthenticPixels(source->view,exception) == MagickFalse)
{
InheritException(source->exception,GetCacheViewException(source->view));
status=MagickFalse;
}
if (source_image->progress_monitor != (MagickProgressMonitor) NULL)
{
MagickBooleanType
proceed;
#if defined(MAGICKCORE_OPENMP_SUPPORT)
#pragma omp critical (MagickCore_UpdateImageViewIterator)
#endif
proceed=SetImageProgress(source_image,source->description,progress++,
source->extent.height);
if (proceed == MagickFalse)
status=MagickFalse;
}
}
return(status);
}
|
likelihoods.h | /*!
* This file is part of GPBoost a C++ library for combining
* boosting with Gaussian process and mixed effects models
*
* Copyright (c) 2020 Fabio Sigrist. All rights reserved.
*
* Licensed under the Apache License Version 2.0. See LICENSE file in the project root for license information.
*/
#ifndef GPB_LIKELIHOODS_
#define GPB_LIKELIHOODS_
#define _USE_MATH_DEFINES // for M_SQRT1_2 and M_PI
#include <cmath>
#include <GPBoost/type_defs.h>
#include <GPBoost/sparse_matrix_utils.h>
#include <GPBoost/DF_utils.h>
#include <string>
#include <set>
#include <string>
#include <vector>
#include <LightGBM/utils/log.h>
using LightGBM::Log;
//Mathematical constants usually defined in cmath
#ifndef M_SQRT2
#define M_SQRT2 1.414213562373095048801688724209698079 //sqrt(2)
#endif
#include <chrono> // only for debugging
#include <thread> // only for debugging
//std::chrono::steady_clock::time_point beginall = std::chrono::steady_clock::now();// only for debugging
//std::chrono::steady_clock::time_point begin, end;// only for debugging
//double el_time;
//end = std::chrono::steady_clock::now();// only for debugging
//el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - beginall).count()) / 1000000.;// Only for debugging
//Log::REInfo("TOTAL TIME for mode calculation: %g", el_time);// Only for debugging
namespace GPBoost {
/*!
* \brief This class implements the likelihoods for the Gaussian proceses
* The template parameters <T_mat, T_chol> can be either <den_mat_t, chol_den_mat_t> or <sp_mat_t, chol_sp_mat_t>
*/
template<typename T_mat, typename T_chol>
class Likelihood {
public:
/*! \brief Constructor */
Likelihood();
/*!
* \brief Constructor
* \param type Type of likelihood
* \param num_data Number of data points
* \param num_re Number of random effects
* \param Indicates whether the vector a_vec_ / a=ZSigmaZt^-1 is used or not
*/
Likelihood(string_t type,
data_size_t num_data,
data_size_t num_re,
bool has_a_vec) {
string_t likelihood = ParseLikelihoodAlias(type);
if (SUPPORTED_LIKELIHOODS_.find(likelihood) == SUPPORTED_LIKELIHOODS_.end()) {
Log::REFatal("Likelihood of type '%s' is not supported.", likelihood.c_str());
}
likelihood_type_ = likelihood;
num_data_ = num_data;
num_re_ = num_re;
if (likelihood_type_ == "gamma") {
aux_pars_ = { 1. };//shape parameter, TODO: also estimate this parameter
}
chol_fact_pattern_analyzed_ = false;
has_a_vec_ = has_a_vec;
}
/*!
* \brief Initialize mode vector_ (used in Laplace approximation for non-Gaussian data)
*/
void InitializeModeAvec() {
mode_ = vec_t::Zero(num_re_);
mode_previous_value_ = vec_t::Zero(num_re_);
if (has_a_vec_) {
a_vec_ = vec_t::Zero(num_re_);
a_vec_previous_value_ = vec_t::Zero(num_re_);
}
mode_initialized_ = true;
first_deriv_ll_ = vec_t(num_data_);
second_deriv_neg_ll_ = vec_t(num_data_);
}
/*!
* \brief Reset mode to previous value. This is used if too large step-sizes are done which result in increases in the objective function.
" The values (covariance parameters and linear coefficients) are then discarded and consequently the mode should also be reset to the previous value)
*/
void ResetModeToPreviousValue() {
CHECK(mode_initialized_);
mode_ = mode_previous_value_;
if (has_a_vec_) {
a_vec_ = a_vec_previous_value_;
}
}
/*! \brief Destructor */
~Likelihood() {
}
/*!
* \brief Returns the type of likelihood
*/
string_t GetLikelihood() const {
return(likelihood_type_);
}
/*!
* \brief Set the type of likelihood
* \param type Likelihood name
*/
void SetLikelihood(const string_t& type) {
string_t likelihood = ParseLikelihoodAlias(type);
if (SUPPORTED_LIKELIHOODS_.find(likelihood) == SUPPORTED_LIKELIHOODS_.end()) {
Log::REFatal("Likelihood of type '%s' is not supported.", likelihood.c_str());
}
likelihood_type_ = likelihood;
chol_fact_pattern_analyzed_ = false;
}
/*!
* \brief Returns the type of the response variable (label). Either "double" or "int"
*/
string_t label_type() const {
if (likelihood_type_ == "bernoulli_probit" || likelihood_type_ == "bernoulli_logit" ||
likelihood_type_ == "poisson") {
return("int");
}
else {
return("double");
}
}
/*!
* \brief Checks whether the response variables (labels) have the correct values
* \param y_data Response variable data
* \param num_data Number of data points
*/
template <typename T>//T can be double or float
void CheckY(const T* y_data, const data_size_t num_data) const {
if (likelihood_type_ == "bernoulli_probit" || likelihood_type_ == "bernoulli_logit") {
//#pragma omp parallel for schedule(static)//problematic with error message below...
for (data_size_t i = 0; i < num_data; ++i) {
if (fabs(y_data[i]) >= EPSILON_ && !AreSame<T>(y_data[i], 1.)) {
Log::REFatal("Response variable (label) data needs to be 0 or 1 for likelihood of type '%s'.", likelihood_type_.c_str());
}
}
}
else if (likelihood_type_ == "poisson") {
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data[i] < 0) {
Log::REFatal("Found negative response variable. Response variable cannot be negative for likelihood of type '%s'.", likelihood_type_.c_str());
}
else {
double intpart;
if (std::modf(y_data[i], &intpart) != 0.0) {
Log::REFatal("Found non-integer response variable. Response variable can only be integer valued for likelihood of type '%s'.", likelihood_type_.c_str());
}
}
}
}
else if (likelihood_type_ == "gamma") {
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data[i] < 0) {
Log::REFatal("Found negative response variable. Response variable cannot be negative for likelihood of type '%s'.", likelihood_type_.c_str());
}
}
}
}
/*!
* \brief Calculate normalizing constant for (log-)likelihood calculation
* \param y_data Response variable data
* \param num_data Number of data points
*/
template <typename T>//T can be double or int
void CalculateNormalizingConstant(const T* y_data, const data_size_t num_data) {
if (likelihood_type_ == "poisson") {
double log_normalizing_constant = 0.;
#pragma omp parallel for schedule(static) reduction(+:log_normalizing_constant)
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data[i] > 1) {
double log_factorial = 0.;
for (int k = 2; k <= y_data[i]; ++k) {
log_factorial += std::log(k);
}
log_normalizing_constant += log_factorial;
}
}
log_normalizing_constant_ = log_normalizing_constant;
}
else if (likelihood_type_ == "gamma") {
// //Currently not used since aux_pars_[0]==1 and thus log_normalizing_constant_==0
// double log_normalizing_constant = 0.;
//#pragma omp parallel for schedule(static) reduction(+:log_normalizing_constant)
// for (data_size_t i = 0; i < num_data; ++i) {
// log_normalizing_constant += -(aux_pars_[0] - 1.) * std::log(y_data[i]) - aux_pars_[0] * std::log(aux_pars_[0]) + std::tgamma(aux_pars_[0]);
// }
// log_normalizing_constant_ = log_normalizing_constant;
log_normalizing_constant_ = 0. * y_data[0];//y_data[0] is just a trick to avoid compiler warnings complaning about unreferenced parameters...
}
normalizing_constant_has_been_calculated_ = true;
}
/*!
* \brief Evaluate the log-likelihood conditional on the latent variable (=location_par)
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param location_par Location parameter (random plus fixed effects)
* \param num_data Number of data points
*/
double LogLikelihood(const double* y_data, const int* y_data_int,
const double* location_par, const data_size_t num_data) {
if (!normalizing_constant_has_been_calculated_) {
Log::REFatal("The normalizing constant has not been calculated. Call 'CalculateNormalizingConstant' first.");
}
double ll = 0.;
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static) reduction(+:ll)
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data_int[i] == 0) {
ll += std::log(1 - normalCDF(location_par[i]));
}
else {
ll += std::log(normalCDF(location_par[i]));
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static) reduction(+:ll)
for (data_size_t i = 0; i < num_data; ++i) {
ll += y_data_int[i] * location_par[i] - std::log(1 + std::exp(location_par[i]));
//Alternative version:
//if (y_data_int[i] == 0) {
// ll += std::log(1 - CondMeanLikelihood(location_par[i]));//CondMeanLikelihood = logistic function
//}
//else {
// ll += std::log(CondMeanLikelihood(location_par[i]));
//}
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static) reduction(+:ll)
for (data_size_t i = 0; i < num_data; ++i) {
ll += y_data_int[i] * location_par[i] - std::exp(location_par[i]);
}
ll -= log_normalizing_constant_;
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static) reduction(+:ll)
for (data_size_t i = 0; i < num_data; ++i) {
ll += -aux_pars_[0] * (location_par[i] + y_data[i] * std::exp(-location_par[i]));
}
ll -= log_normalizing_constant_;
}
return(ll);
}
/*!
* \brief Calculate the first derivative of the log-likelihood with respect to the location parameter
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param location_par Location parameter (random plus fixed effects)
* \param num_data Number of data points
*/
void CalcFirstDerivLogLik(const double* y_data, const int* y_data_int,
const double* location_par, const data_size_t num_data) {
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
if (y_data_int[i] == 0) {
first_deriv_ll_[i] = -normalPDF(location_par[i]) / (1 - normalCDF(location_par[i]));
}
else {
first_deriv_ll_[i] = normalPDF(location_par[i]) / normalCDF(location_par[i]);
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
first_deriv_ll_[i] = y_data_int[i] - CondMeanLikelihood(location_par[i]);//CondMeanLikelihood = logistic(x)
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
first_deriv_ll_[i] = y_data_int[i] - std::exp(location_par[i]);
}
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
first_deriv_ll_[i] = aux_pars_[0] * (y_data[i] * std::exp(-location_par[i]) - 1.);
}
}
}
/*!
* \brief Calculate the second derivative of the negative (!) log-likelihood with respect to the location parameter
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param location_par Location parameter (random plus fixed effects)
* \param num_data Number of data points
*/
void CalcSecondDerivNegLogLik(const double* y_data, const int* y_data_int,
const double* location_par, const data_size_t num_data) {
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
double dnorm = normalPDF(location_par[i]);
double pnorm = normalCDF(location_par[i]);
if (y_data_int[i] == 0) {
double dnorm_frac_one_min_pnorm = dnorm / (1. - pnorm);
second_deriv_neg_ll_[i] = -dnorm_frac_one_min_pnorm * (location_par[i] - dnorm_frac_one_min_pnorm);
}
else {
double dnorm_frac_pnorm = dnorm / pnorm;
second_deriv_neg_ll_[i] = dnorm_frac_pnorm * (location_par[i] + dnorm_frac_pnorm);
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
double exp_loc_i = std::exp(location_par[i]);
second_deriv_neg_ll_[i] = exp_loc_i * std::pow(1. + exp_loc_i, -2);
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
second_deriv_neg_ll_[i] = std::exp(location_par[i]);
}
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
second_deriv_neg_ll_[i] = aux_pars_[0] * y_data[i] * std::exp(-location_par[i]);
}
}
}
/*!
* \brief Calculate the third derivative of the log-likelihood with respect to the location parameter
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param location_par Location parameter (random plus fixed effects)
* \param num_data Number of data points
* \param[out] third_deriv Third derivative of the log-likelihood with respect to the location parameter. Need to pre-allocate memory of size num_data
*/
void CalcThirdDerivLogLik(const double* y_data, const int* y_data_int,
const double* location_par, const data_size_t num_data, double* third_deriv) {
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
double dnorm = normalPDF(location_par[i]);
double pnorm = normalCDF(location_par[i]);
if (y_data_int[i] == 0) {
double dnorm_frac_one_min_pnorm = dnorm / (1. - pnorm);
third_deriv[i] = dnorm_frac_one_min_pnorm * (1 - location_par[i] * location_par[i] +
dnorm_frac_one_min_pnorm * (3 * location_par[i] - 2 * dnorm_frac_one_min_pnorm));
}
else {
double dnorm_frac_pnorm = dnorm / pnorm;
third_deriv[i] = dnorm_frac_pnorm * (location_par[i] * location_par[i] - 1 +
dnorm_frac_pnorm * (3 * location_par[i] + 2 * dnorm_frac_pnorm));
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
double exp_loc_i = std::exp(location_par[i]);
third_deriv[i] = -exp_loc_i * (1. - exp_loc_i) * std::pow(1 + exp_loc_i, -3);
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
third_deriv[i] = -std::exp(location_par[i]);
}
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
third_deriv[i] = aux_pars_[0] * y_data[i] * std::exp(-location_par[i]);
}
}
}
/*!
* \brief Calculate the mean of the likelihood conditional on the (predicted) latent variable
* Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable
*/
inline double CondMeanLikelihood(const double value) const {
if (likelihood_type_ == "gaussian") {
return value;
}
else if (likelihood_type_ == "bernoulli_probit") {
return normalCDF(value);
}
else if (likelihood_type_ == "bernoulli_logit") {
return 1. / (1. + std::exp(-value));
}
else if (likelihood_type_ == "poisson") {
return std::exp(value);
}
else if (likelihood_type_ == "gamma") {
return std::exp(value);
}
else {
Log::REFatal("CondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str());
return 0.;
}
}
/*!
* \brief Calculate the first derivative of the logarithm of the mean of the likelihood conditional on the (predicted) latent variable
* Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable
*/
inline double FirstDerivLogCondMeanLikelihood(const double value) const {
if (likelihood_type_ == "bernoulli_logit") {
return 1. / (1. + std::exp(value));
}
else if (likelihood_type_ == "poisson") {
return 1.;
}
else if (likelihood_type_ == "gamma") {
return 1.;
}
else {
Log::REFatal("FirstDerivLogCondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str());
return 0.;
}
}
/*!
* \brief Calculate the second derivative of the logarithm of the mean of the likelihood conditional on the (predicted) latent variable
* Used for adaptive Gauss-Hermite quadrature for the prediction of the response variable
*/
inline double SecondDerivLogCondMeanLikelihood(const double value) const {
if (likelihood_type_ == "bernoulli_logit") {
double exp_x = std::exp(value);
return -exp_x / ((1. + exp_x) * (1. + exp_x));
}
else if (likelihood_type_ == "poisson") {
return 0.;
}
else if (likelihood_type_ == "gamma") {
return 0.;
}
else {
Log::REFatal("SecondDerivLogCondMeanLikelihood: Likelihood of type '%s' is not supported.", likelihood_type_.c_str());
return 0.;
}
}
/*!
* \brief Do Cholesky decomposition
* \param[out] chol_fact Cholesky factor
* \param psi Matrix for which the Cholesky decomposition should be done
*/
template <class T_mat_1, typename std::enable_if< std::is_same<sp_mat_t, T_mat_1>::value>::type * = nullptr >
void CalcChol(T_chol& chol_fact, const T_mat_1& psi) {
if (!chol_fact_pattern_analyzed_) {
chol_fact.analyzePattern(psi);
chol_fact_pattern_analyzed_ = true;
}
chol_fact.factorize(psi);
}
template <class T_mat_1, typename std::enable_if< std::is_same<den_mat_t, T_mat_1>::value>::type * = nullptr >
void CalcChol(T_chol& chol_fact, const T_mat_1& psi) {
chol_fact.compute(psi);
}
/*!
* \brief Apply permutation matrix of Cholesky factor (if it exists)
* \param chol_fact Cholesky factor
* \param M[out] Matrix to which the permutation is applied to
*/
template <class T_mat_1, typename std::enable_if< std::is_same<sp_mat_t, T_mat_1>::value>::type * = nullptr >
void ApplyPermutationCholeskyFactor(const T_chol& chol_fact, T_mat_1& M) {
if (chol_fact.permutationP().size() > 0) {//Apply permutation if an ordering is used
M = chol_fact.permutationP() * M;
}
}
template <class T_mat_1, typename std::enable_if< std::is_same<den_mat_t, T_mat_1>::value>::type * = nullptr >
void ApplyPermutationCholeskyFactor(const T_chol&, T_mat_1&) {
}
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood..
* Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt).
* In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id".
* This version is used for the Laplace approximation when dense matrices are used (e.g. GP models).
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
void FindModePostRandEffCalcMLLStable(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> ZSigmaZt,
double& approx_marginal_ll) {
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
a_vec_previous_value_ = a_vec_;
}
bool no_fixed_effects = (fixed_effects == nullptr);
vec_t location_par;
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
if (no_fixed_effects) {
approx_marginal_ll = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data);
}
else {
location_par = vec_t(num_data);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
approx_marginal_ll = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
}
double approx_marginal_ll_new;
vec_t rhs, v_aux;//auxiliary variables
sp_mat_t Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood)
Wsqrt.setIdentity();
T_mat Id(num_data, num_data);
Id.setIdentity();
T_mat Id_plus_Wsqrt_ZSigmaZt_Wsqrt;
// Start finding mode
int it;
bool terminate_optim = false;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
if (no_fixed_effects) {
CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data);
}
else {
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
}
// Calculate Cholesky factor of matrix B = Id + Wsqrt * Z*Sigma*Zt * Wsqrt
Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt();
Id_plus_Wsqrt_ZSigmaZt_Wsqrt = Id + Wsqrt * (*ZSigmaZt) * Wsqrt;
CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_Wsqrt_ZSigmaZt_Wsqrt);
// Update mode and a_vec_
rhs.array() = second_deriv_neg_ll_.array() * mode_.array() + first_deriv_ll_.array();
v_aux = Wsqrt * (*ZSigmaZt) * rhs;
a_vec_ = rhs - Wsqrt * (chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.solve(v_aux));
mode_ = (*ZSigmaZt) * a_vec_;
// Calculate new objective function
if (no_fixed_effects) {
approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data);
}
else {
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
}
if (std::isnan(approx_marginal_ll_new) || std::isinf(approx_marginal_ll_new)) {
Log::REDebug(NA_OR_INF_WARNING_);
break;
}
if (it == 0) {
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { // allow for decreases in first iteration
terminate_optim = true;
}
}
else {
if ((approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) {
terminate_optim = true;
}
}
if (terminate_optim) {
if (approx_marginal_ll_new < approx_marginal_ll) {
Log::REDebug(NO_INCREASE_IN_MLL_WARNING_);
}
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
}
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug(NO_CONVERGENCE_WARNING_);
}
if (no_fixed_effects) {
CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data);
}
else {
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
}
Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt();
Id_plus_Wsqrt_ZSigmaZt_Wsqrt = Id + Wsqrt * (*ZSigmaZt) * Wsqrt;
CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_Wsqrt_ZSigmaZt_Wsqrt);
approx_marginal_ll -= ((T_mat)chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL()).diagonal().array().log().sum();
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLStable");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//Log::REInfo("a");
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("a[%d]: %g", i, a_vec_[i]);
//}
}//end FindModePostRandEffCalcMLLStable
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.
* Calculations are done on the random effects (b) scale and not the "data scale" (Zb) using
* a numerically stable variant based on factorizing ("inverting") B = (Id + ZtWZsqrt * Sigma * ZtWZsqrt).
* This version is used for the Laplace approximation when there is only one Gaussian process and
* there are a lot of multiple observations at the same location, i.e., the dimenion of the random effects b is much smaller than Zb
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
void FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> Sigma,
const data_size_t * const random_effects_indices_of_data,
double& approx_marginal_ll) {
//std::chrono::steady_clock::time_point beginall = std::chrono::steady_clock::now();// only for debugging
//std::chrono::steady_clock::time_point begin, end;// only for debugging
//double el_time;
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
a_vec_previous_value_ = a_vec_;
}
vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
approx_marginal_ll = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
double approx_marginal_ll_new;
vec_t diag_sqrt_ZtWZ(num_re_);//sqrt of diagonal matrix ZtWZ
T_mat Id(num_re_, num_re_);
Id.setIdentity();
T_mat Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt;
vec_t rhs, v_aux;
int it;
bool terminate_optim = false;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
// Calculate right hand side for mode update
diag_sqrt_ZtWZ.setZero();
#pragma omp parallel
{
vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_sqrt_ZtWZ[i_re] += diag_sqrt_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
//Non-parallel version
//for (data_size_t i = 0; i < num_data; ++i) {
// diag_sqrt_ZtWZ[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
//}
rhs = (diag_sqrt_ZtWZ.array() * mode_.array()).matrix();//rhs = ZtWZ * mode_ + Zt * first_deriv_ll_ for updating mode
#pragma omp parallel
{
vec_t rhs_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
rhs_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
rhs[i_re] += rhs_private[i_re];
}
}//end omp critical
}//end omp parallel
// Calculate Cholesky factor of matrix B = Id + ZtWZsqrt * Sigma * ZtWZsqrt
diag_sqrt_ZtWZ.array() = diag_sqrt_ZtWZ.array().sqrt();
Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt = Id + diag_sqrt_ZtWZ.asDiagonal() * (*Sigma) * diag_sqrt_ZtWZ.asDiagonal();
CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt);//this is the bottleneck (for large data and sparse matrices)
////only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale: Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt: number non zeros = %d", GetNumberNonZeros<T_mat>(Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt));//only for debugging
//T_mat chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt = chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL();//only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale: chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_: number non zeros = %d", GetNumberNonZeros<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt));//only for debugging
// Update mode and a_vec_
v_aux = (*Sigma) * rhs;
v_aux.array() *= diag_sqrt_ZtWZ.array();
a_vec_ = -chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.solve(v_aux);
a_vec_.array() *= diag_sqrt_ZtWZ.array();
a_vec_.array() += rhs.array();
mode_ = (*Sigma) * a_vec_;
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Calculate new objective function
approx_marginal_ll_new = -0.5 * (a_vec_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
if (std::isnan(approx_marginal_ll_new) || std::isinf(approx_marginal_ll_new)) {
Log::REDebug(NA_OR_INF_WARNING_);
break;
}
//Log::REInfo("it = %d, approx_marginal_ll = %g, approx_marginal_ll_new = %g", it, approx_marginal_ll, approx_marginal_ll_new);///Only for debugging
if (it == 0) {
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { // allow for decreases in first iteration
terminate_optim = true;
}
}
else {
if ((approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) {
terminate_optim = true;
}
}
if (terminate_optim) {
if (approx_marginal_ll_new < approx_marginal_ll) {
Log::REDebug(NO_INCREASE_IN_MLL_WARNING_);
}
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
}//end loop for finding mode
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug(NO_CONVERGENCE_WARNING_);
}
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
diag_sqrt_ZtWZ.setZero();
#pragma omp parallel
{
vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_sqrt_ZtWZ[i_re] += diag_sqrt_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
diag_sqrt_ZtWZ.array() = diag_sqrt_ZtWZ.array().sqrt();
Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt = Id + diag_sqrt_ZtWZ.asDiagonal() * (*Sigma) * diag_sqrt_ZtWZ.asDiagonal();
CalcChol<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Id_plus_ZtWZsqrt_Sigma_ZtWZsqrt);
approx_marginal_ll -= ((T_mat)chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL()).diagonal().array().log().sum();
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//Log::REInfo("a");
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("a[%d]: %g", i, a_vec_[i]);
//}
//end = std::chrono::steady_clock::now();// only for debugging
//el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - beginall).count()) / 1000000.;// Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale: TOTAL TIME for mode calculation: %g", el_time);// Only for debugging
}//end FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX
* This version is used for the Laplace approximation when there are only grouped random effects.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix
* \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
void FindModePostRandEffCalcMLLGroupedRE(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& SigmaI,
const sp_mat_t& Zt,
double& approx_marginal_ll) {
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
}
sp_mat_t Z = Zt.transpose();
vec_t location_par = Z * mode_;//location parameter = mode of random effects + fixed effects
if (fixed_effects != nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] += fixed_effects[i];
}
}
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
approx_marginal_ll = -0.5 * (mode_.dot(SigmaI * mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
double approx_marginal_ll_new;
sp_mat_t SigmaI_plus_ZtWZ;
vec_t rhs;
// Start finding mode
int it;
bool terminate_optim = false;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
// Calculate Cholesky factor and update mode
rhs = Zt * first_deriv_ll_ - SigmaI * mode_;//right hand side for updating mode
SigmaI_plus_ZtWZ = SigmaI + Zt * second_deriv_neg_ll_.asDiagonal() * Z;
SigmaI_plus_ZtWZ.makeCompressed();
if (!chol_fact_pattern_analyzed_) {
chol_fact_SigmaI_plus_ZtWZ_grouped_.analyzePattern(SigmaI_plus_ZtWZ);
chol_fact_pattern_analyzed_ = true;
}
chol_fact_SigmaI_plus_ZtWZ_grouped_.factorize(SigmaI_plus_ZtWZ);
mode_ += chol_fact_SigmaI_plus_ZtWZ_grouped_.solve(rhs);
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
location_par = Z * mode_;
if (fixed_effects != nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] += fixed_effects[i];
}
}
// Calculate new objective function
approx_marginal_ll_new = -0.5 * (mode_.dot(SigmaI * mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
if (std::isnan(approx_marginal_ll_new) || std::isinf(approx_marginal_ll_new)) {
Log::REDebug(NA_OR_INF_WARNING_);
break;
}
if (it == 0) {
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { // allow for decreases in first iteration
terminate_optim = true;
}
}
else {
if ((approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) {
terminate_optim = true;
}
}
if (terminate_optim) {
if (approx_marginal_ll_new < approx_marginal_ll) {
Log::REDebug(NO_INCREASE_IN_MLL_WARNING_);
}
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
}//end mode finding algorithm
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug(NO_CONVERGENCE_WARNING_);
}
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
SigmaI_plus_ZtWZ = SigmaI + Zt * second_deriv_neg_ll_.asDiagonal() * Z;
SigmaI_plus_ZtWZ.makeCompressed();
chol_fact_SigmaI_plus_ZtWZ_grouped_.factorize(SigmaI_plus_ZtWZ);
approx_marginal_ll += -((sp_mat_t)chol_fact_SigmaI_plus_ZtWZ_grouped_.matrixL()).diagonal().array().log().sum() + 0.5 * SigmaI.diagonal().array().log().sum();
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLGroupedRE");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//double approx_marginal_ll_1 = -0.5 * (mode_.dot(SigmaI * mode_));
//double approx_marginal_ll_2 = LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
//double approx_marginal_ll_3 = 0.5 * diag_SigmaI_plus_ZtWZ_.array().log().sum() - 0.5 * SigmaI.diagonal().array().log().sum();
//Log::REInfo("approx_marginal_ll_1: %g", approx_marginal_ll_1);
//Log::REInfo("approx_marginal_ll_2: %g", approx_marginal_ll_2);
//Log::REInfo("approx_marginal_ll_3: %g", approx_marginal_ll_3);
//std::this_thread::sleep_for(std::chrono::milliseconds(200));
}//end FindModePostRandEffCalcMLLGroupedRE
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param sigma2 Variance of random effects
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
void FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const double sigma2,
const data_size_t* const random_effects_indices_of_data,
double& approx_marginal_ll) {
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
}
vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
approx_marginal_ll = -0.5 / sigma2 * (mode_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
double approx_marginal_ll_new;
vec_t rhs;
diag_SigmaI_plus_ZtWZ_ = vec_t(num_re_);
// Start finding mode
int it;
bool terminate_optim = false;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
// Calculate rhs for mode update
rhs = - mode_ / sigma2;//right hand side for updating mode
#pragma omp parallel
{
vec_t rhs_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
rhs_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
rhs[i_re] += rhs_private[i_re];
}
}//end omp critical
}//end omp parallel
// Update mode
diag_SigmaI_plus_ZtWZ_.setZero();
#pragma omp parallel
{
vec_t diag_SigmaI_plus_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_SigmaI_plus_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_SigmaI_plus_ZtWZ_[i_re] += diag_SigmaI_plus_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
diag_SigmaI_plus_ZtWZ_.array() += 1. / sigma2;
mode_ += (rhs.array() / diag_SigmaI_plus_ZtWZ_.array()).matrix();
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Calculate new objective function
approx_marginal_ll_new = -0.5 / sigma2 * (mode_.dot(mode_)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
if (std::isnan(approx_marginal_ll_new) || std::isinf(approx_marginal_ll_new)) {
Log::REDebug(NA_OR_INF_WARNING_);
break;
}
if (it == 0) {
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { // allow for decreases in first iteration
terminate_optim = true;
}
}
else {
if ((approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) {
terminate_optim = true;
}
}
if (terminate_optim) {
if (approx_marginal_ll_new < approx_marginal_ll) {
Log::REDebug(NO_INCREASE_IN_MLL_WARNING_);
}
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
}//end mode finding algorithm
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug(NO_CONVERGENCE_WARNING_);
}
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
diag_SigmaI_plus_ZtWZ_.setZero();
#pragma omp parallel
{
vec_t diag_SigmaI_plus_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_SigmaI_plus_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_SigmaI_plus_ZtWZ_[i_re] += diag_SigmaI_plus_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
diag_SigmaI_plus_ZtWZ_.array() += 1. / sigma2;
approx_marginal_ll -= 0.5 * diag_SigmaI_plus_ZtWZ_.array().log().sum() + 0.5 * num_re_ * std::log(sigma2);
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//std::this_thread::sleep_for(std::chrono::milliseconds(200));
}//end FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale
/*!
* \brief Find the mode of the posterior of the latent random effects using Newton's method and calculate the approximative marginal log-likelihood.
* Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor
* of Sigma^-1 has previously been calculated using a Vecchia approximation.
* This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used.
* Caveat: Sigma^-1 + W can be not very sparse
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor)
* \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B
* \param[out] approx_marginal_ll Approximate marginal log-likelihood evaluated at the mode
*/
void FindModePostRandEffCalcMLLVecchia(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& B,
const sp_mat_t& D_inv,
double& approx_marginal_ll) {
// Initialize variables
if (!mode_initialized_) {
InitializeModeAvec();
}
else {
mode_previous_value_ = mode_;
}
bool no_fixed_effects = (fixed_effects == nullptr);
sp_mat_t SigmaI = B.transpose() * D_inv * B;
vec_t location_par;//location parameter = mode of random effects + fixed effects
double approx_marginal_ll_new;
sp_mat_t SigmaI_plus_W;
vec_t rhs, B_mode;
// Initialize objective function (LA approx. marginal likelihood) for use as convergence criterion
B_mode = B * mode_;
if (no_fixed_effects) {
approx_marginal_ll = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data);
}
else {
location_par = vec_t(num_data);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
approx_marginal_ll = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
}
// Start finding mode
int it;
bool terminate_optim = false;
for (it = 0; it < MAXIT_MODE_NEWTON_; ++it) {
// Calculate first and second derivative of log-likelihood
if (no_fixed_effects) {
CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data);
}
else {
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
}
// Calculate Cholesky factor and update mode
rhs.array() = second_deriv_neg_ll_.array() * mode_.array() + first_deriv_ll_.array();//right hand side for updating mode
SigmaI_plus_W = SigmaI;
SigmaI_plus_W.diagonal().array() += second_deriv_neg_ll_.array();
SigmaI_plus_W.makeCompressed();
//Calculation of the Cholesky factor is the bottleneck
if (!chol_fact_pattern_analyzed_) {
chol_fact_SigmaI_plus_ZtWZ_vecchia_.analyzePattern(SigmaI_plus_W);
chol_fact_pattern_analyzed_ = true;
}
chol_fact_SigmaI_plus_ZtWZ_vecchia_.factorize(SigmaI_plus_W);//This is the bottleneck for large data
//Log::REInfo("SigmaI_plus_W: number non zeros = %d", (int)SigmaI_plus_W.nonZeros());//only for debugging
//Log::REInfo("chol_fact_SigmaI_plus_ZtWZ: Number non zeros = %d", (int)((sp_mat_t)chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL()).nonZeros());//only for debugging
mode_ = chol_fact_SigmaI_plus_ZtWZ_vecchia_.solve(rhs);
// Calculate new objective function
B_mode = B * mode_;
if (no_fixed_effects) {
approx_marginal_ll_new = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, mode_.data(), num_data);
}
else {
// Update location parameter of log-likelihood for calculation of approx. marginal log-likelihood (objective function)
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
approx_marginal_ll_new = -0.5 * (B_mode.dot(D_inv * B_mode)) + LogLikelihood(y_data, y_data_int, location_par.data(), num_data);
}
if (std::isnan(approx_marginal_ll_new) || std::isinf(approx_marginal_ll_new)) {
Log::REDebug(NA_OR_INF_WARNING_);
break;
}
if (it == 0) {
if (std::abs(approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) { // allow for decreases in first iteration
terminate_optim = true;
}
}
else {
if ((approx_marginal_ll_new - approx_marginal_ll) < DELTA_REL_CONV_ * std::abs(approx_marginal_ll)) {
terminate_optim = true;
}
}
if (terminate_optim) {
if (approx_marginal_ll_new < approx_marginal_ll) {
Log::REDebug(NO_INCREASE_IN_MLL_WARNING_);
}
approx_marginal_ll = approx_marginal_ll_new;
break;
}
else {
approx_marginal_ll = approx_marginal_ll_new;
}
} // end loop for mode finding
if (it == MAXIT_MODE_NEWTON_) {
Log::REDebug(NO_CONVERGENCE_WARNING_);
}
if (no_fixed_effects) {
CalcFirstDerivLogLik(y_data, y_data_int, mode_.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, mode_.data(), num_data);
}
else {
CalcFirstDerivLogLik(y_data, y_data_int, location_par.data(), num_data);//first derivative is not used here anymore but since it is reused in gradient calculation and in prediction, we calculate it once more
CalcSecondDerivNegLogLik(y_data, y_data_int, location_par.data(), num_data);
}
SigmaI_plus_W = SigmaI;
SigmaI_plus_W.diagonal().array() += second_deriv_neg_ll_.array();
SigmaI_plus_W.makeCompressed();
chol_fact_SigmaI_plus_ZtWZ_vecchia_.factorize(SigmaI_plus_W);
approx_marginal_ll += -((sp_mat_t)chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL()).diagonal().array().log().sum() + 0.5 * D_inv.diagonal().array().log().sum();
mode_has_been_calculated_ = true;
////Only for debugging
//Log::REInfo("FindModePostRandEffCalcMLLVecchia");
//Log::REInfo("Number of iterations: %d", it);
//Log::REInfo("approx_marginal_ll: %g", approx_marginal_ll);
//Log::REInfo("Mode");
//for (int i = 0; i < 10; ++i) {
// Log::REInfo("mode_[%d]: %g", i, mode_[i]);
//}
//std::this_thread::sleep_for(std::chrono::milliseconds(200));
}//end FindModePostRandEffCalcMLLVecchia
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt).
* In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id".
* This version is used for the Laplace approximation when dense matrices are used (e.g. GP models).
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param re_comps_cluster_i Vector with different random effects components. We pass the component pointers to save memory in order to avoid passing a large collection of gardient covariance matrices in memory//TODO: better way than passing this? (relying on all gradients in a vector can lead to large memory consumption)
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void CalcGradNegMargLikelihoodLAApproxStable(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> ZSigmaZt,
const std::vector<std::shared_ptr<RECompBase<T_mat>>>& re_comps_cluster_i,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t& fixed_effect_grad,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLStable(y_data, y_data_int, fixed_effects, num_data, ZSigmaZt, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
bool no_fixed_effects = (fixed_effects == nullptr);
vec_t location_par;//location parameter = mode of random effects + fixed effects
T_mat L_inv_Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood)
L_inv_Wsqrt.setIdentity();
L_inv_Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt();
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
if (no_fixed_effects) {
CalcThirdDerivLogLik(y_data, y_data_int, mode_.data(), num_data, third_deriv.data());
}
else {
location_par = vec_t(num_data);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
}
ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, L_inv_Wsqrt);
chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(L_inv_Wsqrt);//L_inv_Wsqrt = L\Wsqrt
T_mat L_inv_Wsqrt_ZSigmaZt = L_inv_Wsqrt * (*ZSigmaZt);
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
T_mat WI_plus_Sigma_inv = L_inv_Wsqrt.transpose() * L_inv_Wsqrt;//WI_plus_Sigma_inv = Wsqrt * L^T\(L\Wsqrt) = (W^-1 + Sigma)^-1
// calculate gradient of approx. marginal log-likelihood wrt the mode
// note: use (i) (Sigma^-1 + W)^-1 = Sigma - Sigma*(W^-1 + Sigma)^-1*Sigma = ZSigmaZt - L_inv_Wsqrt_ZSigmaZt^T*L_inv_Wsqrt_ZSigmaZt and (ii) "Z=Id"
vec_t d_mll_d_mode = (-0.5 * ((*ZSigmaZt).diagonal() - ((T_mat)(L_inv_Wsqrt_ZSigmaZt.transpose() * L_inv_Wsqrt_ZSigmaZt)).diagonal()).array() * third_deriv.array()).matrix();
vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter
vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par
int par_count = 0;
double explicit_derivative;
for (int j = 0; j < (int)re_comps_cluster_i.size(); ++j) {
for (int ipar = 0; ipar < re_comps_cluster_i[j]->NumCovPar(); ++ipar) {
std::shared_ptr<T_mat> SigmaDeriv = re_comps_cluster_i[j]->GetZSigmaZtGrad(ipar, true, 1.);
// calculate explicit derivative of approx. mariginal log-likelihood
explicit_derivative = -0.5 * (double)(a_vec_.transpose() * (*SigmaDeriv) * a_vec_) + 0.5 * (WI_plus_Sigma_inv.cwiseProduct(*SigmaDeriv)).sum();
// calculate implicit derivative (through mode) of approx. mariginal log-likelihood
v_aux = (*SigmaDeriv) * first_deriv_ll_;
d_mode_d_par = (v_aux.array() - ((*ZSigmaZt) * WI_plus_Sigma_inv * v_aux).array()).matrix();
cov_grad[par_count] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
par_count++;
}
}
////Only for debugging
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad");
//for (int i = 0; i < par_count; ++i) {
// Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]);
//}
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
T_mat L_inv_Wsqrt_ZSigmaZt_sqr = L_inv_Wsqrt_ZSigmaZt.cwiseProduct(L_inv_Wsqrt_ZSigmaZt);
vec_t ZSigmaZtI_plus_W_inv_diag = (*ZSigmaZt).diagonal() - L_inv_Wsqrt_ZSigmaZt_sqr.transpose() * vec_t::Ones(L_inv_Wsqrt_ZSigmaZt_sqr.rows());// diagonal of (ZSigmaZt^-1 + W) ^ -1
vec_t d_mll_d_mode = (-0.5 * ZSigmaZtI_plus_W_inv_diag.array() * third_deriv.array()).matrix();// gradient of approx. marginal likelihood wrt the mode and thus also F here
vec_t L_inv_Wsqrt_ZSigmaZt_d_mll_d_mode = L_inv_Wsqrt_ZSigmaZt * d_mll_d_mode;// for implicit derivative
vec_t ZSigmaZtI_plus_W_inv_d_mll_d_mode = (*ZSigmaZt) * d_mll_d_mode - L_inv_Wsqrt_ZSigmaZt.transpose() * L_inv_Wsqrt_ZSigmaZt_d_mll_d_mode;
vec_t d_mll_d_F_implicit = (ZSigmaZtI_plus_W_inv_d_mll_d_mode.array() * second_deriv_neg_ll_.array()).matrix();// implicit derivative
fixed_effect_grad = -first_deriv_ll_ + d_mll_d_mode - d_mll_d_F_implicit;
}//end calc_F_grad
}//end CalcGradNegMargLikelihoodLAApproxStable
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done on the random effects (b) scale and not the "data scale" (Zb) using
* a numerically stable variant based on factorizing ("inverting") B = (Id + ZtWZsqrt * Sigma * ZtWZsqrt).
* This version is used for the Laplace approximation when there is only one Gaussian process and
* there are a lot of multiple observations at the same location, i.e., the dimenion of the random effects b is much smaller than Zb
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param re_comps_cluster_i Vector with different random effects components. We pass the component pointers to save memory in order to avoid passing a large collection of gardient covariance matrices in memory//TODO: better way than passing this? (relying on all gradients in a vector can lead to large memory consumption)
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> Sigma,
const data_size_t* const random_effects_indices_of_data,
const std::vector<std::shared_ptr<RECompBase<T_mat>>> & re_comps_cluster_i,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t & fixed_effect_grad,
bool calc_mode = false) {
//std::chrono::steady_clock::time_point beginall = std::chrono::steady_clock::now();// only for debugging
//std::chrono::steady_clock::time_point begin, end;// only for debugging
//double el_time;
CHECK(re_comps_cluster_i.size() == 1);
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data,
Sigma, random_effects_indices_of_data, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
// Matrix ZtWZsqrt
vec_t diag_ZtWZ = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_ZtWZ.array()[i_re] += diag_sqrt_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
T_mat L_inv_ZtWZsqrt(num_re_, num_re_);//diagonal matrix with square root of diagonal of ZtWZ
L_inv_ZtWZsqrt.setIdentity();
L_inv_ZtWZsqrt.diagonal().array() = diag_ZtWZ.array().sqrt();
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
vec_t diag_ZtThirdDerivZ(num_re_);//sqrt of diagonal matrix ZtWZ
diag_ZtThirdDerivZ.setZero();
#pragma omp parallel
{
vec_t diag_ZtThirdDerivZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_ZtThirdDerivZ_private[random_effects_indices_of_data[i]] += third_deriv[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_ZtThirdDerivZ[i_re] += diag_ZtThirdDerivZ_private[i_re];
}
}//end omp critical
}//end omp parallel
ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, L_inv_ZtWZsqrt);
chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(L_inv_ZtWZsqrt);//L_inv_ZtWZsqrt = L\ZtWZsqrt //This is the bottleneck (in this first part) for large data when using sparse matrices
T_mat L_inv_ZtWZsqrt_Sigma = L_inv_ZtWZsqrt * (*Sigma);
////Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale: L_inv_ZtWZsqrt: number non zeros = %d", GetNumberNonZeros<T_mat>(L_inv_ZtWZsqrt));//Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale: L_inv_ZtWZsqrt_Sigma: number non zeros = %d", GetNumberNonZeros<T_mat>(L_inv_ZtWZsqrt_Sigma));//Only for debugging
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
vec_t ZtFirstDeriv(num_re_);//sqrt of diagonal matrix ZtWZ
ZtFirstDeriv.setZero();
#pragma omp parallel
{
vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re];
}
}//end omp critical
}//end omp parallel
T_mat ZtWZI_Sigma_inv = L_inv_ZtWZsqrt.transpose() * L_inv_ZtWZsqrt;//ZtWZI_Sigma_inv = ZtWZsqrt * L^T\(L\ZtWZsqrt) = ((ZtWZ)^-1 + Sigma)^-1
// calculate gradient of approx. marginal log-likelihood wrt the mode
// note: use (i) (Sigma^-1 + W)^-1 = Sigma - Sigma*(W^-1 + Sigma)^-1*Sigma = ZSigmaZt - L_inv_ZtWZsqrt_Sigma^T*L_inv_ZtWZsqrt_Sigma
vec_t d_mll_d_mode = (-0.5 * ((*Sigma).diagonal() - ((T_mat)(L_inv_ZtWZsqrt_Sigma.transpose() * L_inv_ZtWZsqrt_Sigma)).diagonal()).array() * diag_ZtThirdDerivZ.array()).matrix();
vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter
vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par
int par_count = 0;
double explicit_derivative;
for (int j = 0; j < (int)re_comps_cluster_i.size(); ++j) {
for (int ipar = 0; ipar < re_comps_cluster_i[j]->NumCovPar(); ++ipar) {
std::shared_ptr<T_mat> SigmaDeriv = re_comps_cluster_i[j]->GetZSigmaZtGrad(ipar, true, 1.);
// calculate explicit derivative of approx. mariginal log-likelihood
explicit_derivative = -0.5 * (double)(a_vec_.transpose() * (*SigmaDeriv) * a_vec_) +
0.5 * (ZtWZI_Sigma_inv.cwiseProduct(*SigmaDeriv)).sum();
// calculate implicit derivative (through mode) of approx. mariginal log-likelihood
v_aux = (*SigmaDeriv) * ZtFirstDeriv;
d_mode_d_par = (v_aux.array() - ((*Sigma) * ZtWZI_Sigma_inv * v_aux).array()).matrix();
cov_grad[par_count] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
par_count++;
}
}
////Only for debugging
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad");
//for (int i = 0; i < par_count; ++i) {
// Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]);
//}
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
T_mat L_inv_ZtWZsqrt_Sigma_sqr = L_inv_ZtWZsqrt_Sigma.cwiseProduct(L_inv_ZtWZsqrt_Sigma);
vec_t SigmaI_plus_ZtWZ_inv_diag = (*Sigma).diagonal() - L_inv_ZtWZsqrt_Sigma_sqr.transpose() * vec_t::Ones(L_inv_ZtWZsqrt_Sigma_sqr.rows());// diagonal of (Sigma^-1 + ZtWZ) ^ -1
vec_t d_mll_d_mode = (-0.5 * SigmaI_plus_ZtWZ_inv_diag.array() * diag_ZtThirdDerivZ.array()).matrix();// gradient of approx. marginal likelihood wrt the mode
vec_t L_inv_ZtWZsqrt_Sigma_d_mll_d_mode = L_inv_ZtWZsqrt_Sigma * d_mll_d_mode;// for implicit derivative
vec_t SigmaI_plus_ZtWZ_inv_d_mll_d_mode = (*Sigma) * d_mll_d_mode - L_inv_ZtWZsqrt_Sigma.transpose() * L_inv_ZtWZsqrt_Sigma_d_mll_d_mode;
fixed_effect_grad = -first_deriv_ll_;
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
fixed_effect_grad[i] += -0.5 * third_deriv[i] * SigmaI_plus_ZtWZ_inv_diag[random_effects_indices_of_data[i]] -
second_deriv_neg_ll_[i] * SigmaI_plus_ZtWZ_inv_d_mll_d_mode[random_effects_indices_of_data[i]];
}
}//end calc_F_grad
//end = std::chrono::steady_clock::now();// only for debugging
//el_time = (double)(std::chrono::duration_cast<std::chrono::microseconds>(end - beginall).count()) / 1000000.;// Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale: TOTAL TIME: %g", el_time);// Only for debugging
}//end CalcGradNegMargLikelihoodLAApproxOnlyOneGPCalculationsOnREScale
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX
* This version is used for the Laplace approximation when there are only grouped random effects.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix
* \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void CalcGradNegMargLikelihoodLAApproxGroupedRE(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& SigmaI,
const sp_mat_t& Zt,
std::vector<data_size_t> cum_num_rand_eff_cluster_i,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t& fixed_effect_grad,
bool calc_mode = false) {
int num_REs = (int)SigmaI.cols();//number of random effect realizations
int num_comps = (int)cum_num_rand_eff_cluster_i.size() - 1;//number of different random effect components
if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLGroupedRE(y_data, y_data_int, fixed_effects, num_data, SigmaI, Zt, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
sp_mat_t Z = Zt.transpose();
vec_t location_par = Z * mode_;//location parameter = mode of random effects + fixed effects
if (fixed_effects != nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] += fixed_effects[i];
}
}
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
// Calculate (Sigma^-1 + Zt*W*Z)^-1
sp_mat_t L_inv(num_REs, num_REs);
L_inv.setIdentity();
if (chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP().size() > 0) {//Permutation is only used when having an ordering
L_inv = chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP() * L_inv;
}
chol_fact_SigmaI_plus_ZtWZ_grouped_.matrixL().solveInPlace(L_inv);
sp_mat_t SigmaI_plus_ZtWZ_inv = L_inv.transpose() * L_inv;
// calculate gradient of approx. marginal likeligood wrt the mode
//Note: the calculation of d_mll_d_mode is the bottleneck of this function (corresponding lines below are indicated with * and, in particular, **)
vec_t d_mll_d_mode(num_REs);
sp_mat_t Zt_third_deriv = Zt * third_deriv.asDiagonal();//every column of Z multiplied elementwise by third_deriv
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_REs; ++i) {
vec_t diag_d_W_d_mode_i = Zt_third_deriv.row(i);//*can be slow
//calculate Z^T * diag(diag_d_W_d_mode_i) * Z = Z^T * diag(Z.col(i) * third_deriv) * Z
sp_mat_t Zt_d_W_d_mode_i_Z = (Zt * diag_d_W_d_mode_i.asDiagonal() * Z).pruned();//**can be very slow. Note that this is also slow when the middle diagonal matrix is a pruned sparse matrix
////Variant 2: slower
//sp_mat_t Zt_third_deriv_diag = sp_mat_t(((vec_t)Zt_third_deriv.row(i)).asDiagonal());
//sp_mat_t Zt_d_W_d_mode_i_Z = Zt * Zt_third_deriv_diag * Z;//= Z^T * diag(diag_d_W_d_mode_i) * Z = Z^T * diag(Z.col(i) * third_deriv) * Z
////Variant 3: slower
//vec_t Z_i = Z.col(i);// column number i of Z
//vec_t diag_d_W_d_mode_i = (Z_i.array() * third_deriv.array()).matrix();//diagonal of derivative of matrix W wrt random effect number i
//sp_mat_t Zt_d_W_d_mode_i_Z = Zt * diag_d_W_d_mode_i.asDiagonal() * Z;//= Z^T * diag(diag_d_W_d_mode_i) * Z
d_mll_d_mode[i] = -0.5 * (Zt_d_W_d_mode_i_Z.cwiseProduct(SigmaI_plus_ZtWZ_inv)).sum();
}
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
sp_mat_t ZtWZ = Zt * second_deriv_neg_ll_.asDiagonal() * Z;
vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter
vec_t v_aux;//auxiliary variable for caclulating d_mode_d_par
vec_t SigmaI_mode = SigmaI * mode_;
double explicit_derivative;
sp_mat_t I_j(num_REs, num_REs);//Diagonal matrix with 1 on the diagonal for all random effects of component j and 0's otherwise
sp_mat_t I_j_ZtWZ;
for (int j = 0; j < num_comps; ++j) {
// calculate explicit derivative of approx. mariginal log-likelihood
std::vector<Triplet_t> triplets;//for constructing I_j
triplets.reserve(cum_num_rand_eff_cluster_i[j + 1] - cum_num_rand_eff_cluster_i[j]);
explicit_derivative = 0.;
for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) {
triplets.emplace_back(i, i, 1.);
explicit_derivative += SigmaI_mode[i] * mode_[i];
}
// Altervative version using parallelization (not faster)
//#pragma omp parallel
// {
// std::vector<Triplet_t> triplets_private;
// //triplets_private.reserve(cum_num_rand_eff_cluster_i[num_comps]);
//#pragma omp for nowait reduction(+:explicit_derivative)
// for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) {
// triplets_private.emplace_back(i, i, 1.);
// explicit_derivative += SigmaI_mode[i] * mode_[i];
// }
//#pragma omp critical
// triplets.insert(triplets.end(), triplets_private.begin(), triplets_private.end());
// }
//#pragma omp parallel for schedule(static) reduction(+:explicit_derivative)
// for (int i = cum_num_rand_eff_cluster_i[j]; i < cum_num_rand_eff_cluster_i[j + 1]; ++i) {
// explicit_derivative += SigmaI_mode[i] * mode_[i];
// }
explicit_derivative *= -0.5;
I_j.setFromTriplets(triplets.begin(), triplets.end());
I_j_ZtWZ = I_j * ZtWZ;
explicit_derivative += 0.5 * (SigmaI_plus_ZtWZ_inv.cwiseProduct(I_j_ZtWZ)).sum();
// calculate implicit derivative (through mode) of approx. mariginal log-likelihood
d_mode_d_par = SigmaI_plus_ZtWZ_inv * I_j * Zt * first_deriv_ll_;
cov_grad[j] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
}
////Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxGroupedRE");
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad");
//for (int i = 0; i < num_comps; ++i) {
// Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]);
//}
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
vec_t d_detmll_d_F(num_data);
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data; ++i) {
sp_mat_t zi_zit = Zt.col(i) * Z.row(i);//=Z.row(i) * (Z.row(i)).transpose()
d_detmll_d_F[i] = -0.5 * third_deriv[i] * (SigmaI_plus_ZtWZ_inv.cwiseProduct(zi_zit)).sum();
}
vec_t d_mll_d_modeT_SigmaI_plus_ZtWZ_inv_Zt_W = d_mll_d_mode.transpose() * SigmaI_plus_ZtWZ_inv * Zt * second_deriv_neg_ll_.asDiagonal();
fixed_effect_grad = -first_deriv_ll_ + d_detmll_d_F - d_mll_d_modeT_SigmaI_plus_ZtWZ_inv_Zt_W;
}//end calc_F_grad
}//end CalcGradNegMargLikelihoodLAApproxGroupedRE
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param sigma2 Variance of random effects
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const double sigma2,
const data_size_t* const random_effects_indices_of_data,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t& fixed_effect_grad,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data,
sigma2, random_effects_indices_of_data, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
vec_t location_par(num_data);//location parameter = mode of random effects + fixed effects
if (fixed_effects == nullptr) {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]];
}
}
else {
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[random_effects_indices_of_data[i]] + fixed_effects[i];
}
}
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
// calculate gradient of approx. marginal likeligood wrt the mode
vec_t d_mll_d_mode = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t third_deriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
third_deriv_private[random_effects_indices_of_data[i]] += third_deriv[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
d_mll_d_mode[i_re] += third_deriv_private[i_re];
}
}//end omp critical
}//end omp parallel
d_mll_d_mode.array() /= -2. * diag_SigmaI_plus_ZtWZ_.array();
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
vec_t diag_ZtWZ = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t diag_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_ZtWZ[i_re] += diag_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
double explicit_derivative = -0.5 * (mode_.array() * mode_.array()).sum() / sigma2 +
0.5 * (diag_ZtWZ.array() / diag_SigmaI_plus_ZtWZ_.array()).sum();
// calculate implicit derivative (through mode) of approx. mariginal log-likelihood
vec_t d_mode_d_par = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t first_deriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
first_deriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
d_mode_d_par[i_re] += first_deriv_private[i_re];
}
}//end omp critical
}//end omp parallel
d_mode_d_par.array() /= diag_SigmaI_plus_ZtWZ_.array();
cov_grad[0] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
////Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale");
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad[0]: %g", cov_grad[0]);
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < num_data; ++i) {
fixed_effect_grad[i] = -first_deriv_ll_[i] -
0.5 * third_deriv[i] / diag_SigmaI_plus_ZtWZ_[random_effects_indices_of_data[i]] - //=d_detmll_d_F
d_mll_d_mode[random_effects_indices_of_data[i]] * second_deriv_neg_ll_[i] / diag_SigmaI_plus_ZtWZ_[random_effects_indices_of_data[i]];//=implicit derivative = d_mll_d_mode * d_mode_d_F
}
////Only for debugging
//Log::REInfo("CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale");
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("fixed_effect_grad[%d]: %g", i, fixed_effect_grad[i]);
//}
}//end calc_F_grad
}//end CalcGradNegMargLikelihoodLAApproxOnlyOneGroupedRECalculationsOnREScale
/*!
* \brief Calculate the gradient of the negative Laplace approximated marginal log-likelihood wrt covariance parameters, fixed effects, or linear regression coefficients
* Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor
* of Sigma^-1 has previously been calculated using a Vecchia approximation.
* This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used.
* Caveat: Sigma^-1 + W can be not very sparse
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor)
* \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B
* \param B_grad Derivatives of matrices B ( = derivative of matrix -A) for Vecchia approximation
* \param D_grad Derivatives of matrices D for Vecchia approximation
* \param calc_cov_grad If true, the gradient wrt the covariance parameters are calculated
* \param calc_F_grad If true, the gradient wrt the fixed effects mean function F are calculated
* \param[out] cov_grad Gradient of approximate marginal log-likelihood wrt covariance parameters (needs to be preallocated of size num_cov_par)
* \param[out] fixed_effect_grad Gradient of approximate marginal log-likelihood wrt fixed effects F (note: this is passed as a Eigen vector in order to avoid the need for copying)
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void CalcGradNegMargLikelihoodLAApproxVecchia(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& B,
const sp_mat_t& D_inv,
const std::vector<sp_mat_t>& B_grad,
const std::vector<sp_mat_t>& D_grad,
bool calc_cov_grad,
bool calc_F_grad,
double* cov_grad,
vec_t& fixed_effect_grad,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLVecchia(y_data, y_data_int, fixed_effects, num_data, B, D_inv, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
// Initialize variables
bool no_fixed_effects = (fixed_effects == nullptr);
vec_t location_par;//location parameter = mode of random effects + fixed effects
vec_t third_deriv(num_data);//vector of third derivatives of log-likelihood
if (no_fixed_effects) {
CalcThirdDerivLogLik(y_data, y_data_int, mode_.data(), num_data, third_deriv.data());
}
else {
location_par = vec_t(num_data);
#pragma omp parallel for schedule(static)
for (data_size_t i = 0; i < num_data; ++i) {
location_par[i] = mode_[i] + fixed_effects[i];
}
CalcThirdDerivLogLik(y_data, y_data_int, location_par.data(), num_data, third_deriv.data());
}
// Calculate (Sigma^-1 + W)^-1
sp_mat_t L_inv(num_data, num_data);
L_inv.setIdentity();
if (chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP().size() > 0) {//Permutation is only used when having an ordering
L_inv = chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP() * L_inv;
}
chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL().solveInPlace(L_inv);
// calculate gradient wrt covariance parameters
if (calc_cov_grad) {
sp_mat_t SigmaI_plus_W_inv = L_inv.transpose() * L_inv;//Note: this is the computational bottleneck for large data
vec_t d_mll_d_mode = -0.5 * (SigmaI_plus_W_inv.diagonal().array() * third_deriv.array()).matrix();// gradient of approx. marginal likeligood wrt the mode
vec_t d_mode_d_par;//derivative of mode wrt to a covariance parameter
double explicit_derivative;
int num_par = (int)B_grad.size();
sp_mat_t SigmaI_deriv;
sp_mat_t BgradT_Dinv_B;
sp_mat_t Bt_Dinv_Bgrad;
for (int j = 0; j < num_par; ++j) {
SigmaI_deriv = B_grad[j].transpose() * D_inv * B;
Bt_Dinv_Bgrad = SigmaI_deriv.transpose();
SigmaI_deriv += Bt_Dinv_Bgrad - B.transpose() * D_inv * D_grad[j] * D_inv * B;
d_mode_d_par = -SigmaI_plus_W_inv * SigmaI_deriv * mode_;
explicit_derivative = 0.5 * mode_.dot(SigmaI_deriv * mode_) +
0.5 * ((D_inv.diagonal().array() * D_grad[j].diagonal().array()).sum() + (SigmaI_deriv.cwiseProduct(SigmaI_plus_W_inv)).sum());
// Alternative version (not faster)
//vec_t u = D_inv * B * mode_;
//vec_t uk = B_grad[j] * mode_;
//explicit_derivative = uk.dot(u) - 0.5 * u.dot(D_grad[j] * u) +
// 0.5 * ((D_inv.diagonal().array() * D_grad[j].diagonal().array()).sum() + (SigmaI_deriv.cwiseProduct(SigmaI_plus_W_inv)).sum());
cov_grad[j] = explicit_derivative + d_mll_d_mode.dot(d_mode_d_par);
}
////Only for debugging
//Log::REInfo("explicit_derivative: %g", explicit_derivative);
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mll_d_mode[%d]: %g", i, d_mll_d_mode[i]);
//}
//for (int i = 0; i < 5; ++i) {
// Log::REInfo("d_mode_d_par[%d]: %g", i, d_mode_d_par[i]);
//}
//Log::REInfo("cov_grad");
//for (int i = 0; i < num_par; ++i) {
// Log::REInfo("cov_grad[%d]: %g", i, cov_grad[i]);
//}
}//end calc_cov_grad
// calculate gradient wrt fixed effects
if (calc_F_grad) {
sp_mat_t L_inv_sqr = L_inv.cwiseProduct(L_inv);
vec_t SigmaI_plus_W_inv_diag = L_inv_sqr.transpose() * vec_t::Ones(L_inv_sqr.rows());// diagonal of (Sigma^-1 + W) ^ -1
vec_t d_mll_d_mode = (-0.5 * SigmaI_plus_W_inv_diag.array() * third_deriv.array()).matrix();// gradient of approx. marginal likelihood wrt the mode and thus also F here
vec_t L_inv_d_mll_d_mode = L_inv * d_mll_d_mode;// for implicit derivative
vec_t SigmaI_plus_W_inv_d_mll_d_mode = L_inv.transpose() * L_inv_d_mll_d_mode;
vec_t d_mll_d_F_implicit = -(SigmaI_plus_W_inv_d_mll_d_mode.array() * second_deriv_neg_ll_.array()).matrix();// implicit derivative
fixed_effect_grad = -first_deriv_ll_ + d_mll_d_mode + d_mll_d_F_implicit;
}//end calc_F_grad
}//end CalcGradNegMargLikelihoodLAApproxVecchia
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt).
* In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id".
* This version is used for the Laplace approximation when dense matrices are used (e.g. GP models).
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param ZSigmaZt Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void PredictLAApproxStable(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> ZSigmaZt,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLStable(y_data, y_data_int, fixed_effects, num_data, ZSigmaZt, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
pred_mean = Cross_Cov * first_deriv_ll_;
if (calc_pred_cov || calc_pred_var) {
sp_mat_t Wsqrt(num_data, num_data);//diagonal matrix with square root of negative second derivatives on the diagonal (sqrt of negative Hessian of log-likelihood)
Wsqrt.setIdentity();
Wsqrt.diagonal().array() = second_deriv_neg_ll_.array().sqrt();
T_mat Maux = Wsqrt * Cross_Cov.transpose();
ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Maux);
chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(Maux);
if (calc_pred_cov) {
pred_cov -= Maux.transpose() * Maux;
}
if (calc_pred_var) {
Maux = Maux.cwiseProduct(Maux);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] -= Maux.col(i).sum();
}
}
}
////Only for debugging
//Log::REInfo("PredictLAApproxStable");
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i));
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("first_deriv_ll_[%d]: %g", i, first_deriv_ll_[i]);
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]);
//}
//if (calc_pred_var) {
// for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_var[%d]: %g", i, pred_var[i]);
// }
//}
}//end PredictLAApproxStable
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done using a numerically stable variant based on factorizing ("inverting") B = (Id + Wsqrt * Z*Sigma*Zt * Wsqrt).
* In the notation of the paper: "Sigma = Z*Sigma*Z^T" and "Z = Id".
* This version is used for the Laplace approximation when dense matrices are used (e.g. GP models).
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param Sigma Covariance matrix of latent random effect (can be den_mat_t or sp_mat_t)
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void PredictLAApproxOnlyOneGPCalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const std::shared_ptr<T_mat> Sigma,
const data_size_t* const random_effects_indices_of_data,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLOnlyOneGPCalculationsOnREScale(y_data, y_data_int, fixed_effects,
num_data, Sigma, random_effects_indices_of_data, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
vec_t ZtFirstDeriv = vec_t::Zero(num_re_);//sqrt of diagonal matrix ZtWZ
#pragma omp parallel
{
vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re];
}
}//end omp critical
}//end omp parallel
pred_mean = Cross_Cov * ZtFirstDeriv;
if (calc_pred_cov || calc_pred_var) {
vec_t diag_ZtWZ = vec_t::Zero(num_re_);
#pragma omp parallel
{
vec_t diag_sqrt_ZtWZ_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
diag_sqrt_ZtWZ_private[random_effects_indices_of_data[i]] += second_deriv_neg_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
diag_ZtWZ.array()[i_re] += diag_sqrt_ZtWZ_private[i_re];
}
}//end omp critical
}//end omp parallel
sp_mat_t ZtWZsqrt(num_re_, num_re_);//diagonal matrix with square root of diagonal of ZtWZ
ZtWZsqrt.setIdentity();
ZtWZsqrt.diagonal().array() = diag_ZtWZ.array().sqrt();
T_mat Maux = ZtWZsqrt * Cross_Cov.transpose();
ApplyPermutationCholeskyFactor<T_mat>(chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, Maux);
chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_.matrixL().solveInPlace(Maux);//Maux = L\(ZtWZsqrt * Cross_Cov^T)
if (calc_pred_cov) {
pred_cov -= Maux.transpose() * Maux;
}
if (calc_pred_var) {
Maux = Maux.cwiseProduct(Maux);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] -= Maux.col(i).sum();
}
}
}
////Only for debugging
//Log::REInfo("PredictLAApproxOnlyOneGPCalculationsOnREScale");
//for (int i = 0; i < 3; ++i) {
// if (Cross_Cov.rows() > 1) {
// Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i));
// }
// else {
// Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i));
// }
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("ZtFirstDeriv[%d]: %g", i, ZtFirstDeriv[i]);
//}
//for (int i = 0; i < std::min((int)pred_mean.size(),3); ++i) {
// Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]);
//}
//if (calc_pred_var) {
// for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_var[%d]: %g", i, pred_var[i]);
// }
//}
}//end PredictLAApproxOnlyOneGPCalculationsOnREScale
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* NOTE: IT IS ASSUMED THAT SIGMA IS A DIAGONAL MATRIX
* This version is used for the Laplace approximation when there are only grouped random effects.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param SigmaI Inverse covariance matrix of latent random effect. Currently, this needs to be a diagonal matrix
* \param Zt Transpose Z^T of random effect design matrix that relates latent random effects to observations/likelihoods
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void PredictLAApproxGroupedRE(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& SigmaI,
const sp_mat_t& Zt,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLGroupedRE(y_data, y_data_int, fixed_effects, num_data, SigmaI, Zt, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
pred_mean = Cross_Cov * first_deriv_ll_;
if (calc_pred_cov || calc_pred_var) {
// calculate Maux = L\(Z^T * second_deriv_neg_ll_.asDiagonal() * Cross_Cov^T)
T_mat Maux = Zt * second_deriv_neg_ll_.asDiagonal() * Cross_Cov.transpose();
if (chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP().size() > 0) {//Permutation is only used when having an ordering
Maux = chol_fact_SigmaI_plus_ZtWZ_grouped_.permutationP() * Maux;
}
chol_fact_SigmaI_plus_ZtWZ_grouped_.matrixL().solveInPlace(Maux);
if (calc_pred_cov) {
pred_cov += Maux.transpose() * Maux - (T_mat)(Cross_Cov * second_deriv_neg_ll_.asDiagonal() * Cross_Cov.transpose());
}
if (calc_pred_var) {
T_mat Maux3 = Cross_Cov.cwiseProduct(Cross_Cov * second_deriv_neg_ll_.asDiagonal());
Maux = Maux.cwiseProduct(Maux);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] += Maux.col(i).sum() - Maux3.row(i).sum();
}
}
}
////Only for debugging
//Log::REInfo("PredictLAApproxGroupedRE");
//for (int i = 0; i < 3; ++i) {
// if (Cross_Cov.rows() > 1) {
// Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i));
// }
// else {
// Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i));
// }
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("first_deriv_ll_[%d]: %g", i, first_deriv_ll_[i]);
//}
//for (int i = 0; i < std::min((int)pred_mean.size(), 3); ++i) {
// Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]);
//}
//if (calc_pred_var) {
// for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_var[%d]: %g", i, pred_var[i]);
// }
//}
}//end PredictLAApproxGroupedRE
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done by directly factorizing ("inverting) (Sigma^-1 + Zt*W*Z).
* This version is used for the Laplace approximation when there are only grouped random effects with only one grouping variable.
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param sigma2 Variance of random effects
* \param random_effects_indices_of_data Indices that indicate to which random effect every data point is related
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void PredictLAApproxOnlyOneGroupedRECalculationsOnREScale(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const double sigma2,
const data_size_t* const random_effects_indices_of_data,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of B = (Id + Wsqrt * ZSigmaZt * Wsqrt) at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLOnlyOneGroupedRECalculationsOnREScale(y_data, y_data_int, fixed_effects, num_data,
sigma2, random_effects_indices_of_data, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
vec_t ZtFirstDeriv = vec_t::Zero(num_re_);//sqrt of diagonal matrix ZtWZ
#pragma omp parallel
{
vec_t ZtFirstDeriv_private = vec_t::Zero(num_re_);
#pragma omp for
for (data_size_t i = 0; i < num_data; ++i) {
ZtFirstDeriv_private[random_effects_indices_of_data[i]] += first_deriv_ll_[i];
}
#pragma omp critical
{
for (data_size_t i_re = 0; i_re < num_re_; ++i_re) {
ZtFirstDeriv[i_re] += ZtFirstDeriv_private[i_re];
}
}//end omp critical
}//end omp parallel
pred_mean = Cross_Cov * ZtFirstDeriv;
vec_t diag_Sigma_plus_ZtWZI = vec_t(num_re_);
diag_Sigma_plus_ZtWZI.array() = 1. / diag_SigmaI_plus_ZtWZ_.array();
diag_Sigma_plus_ZtWZI.array() /= sigma2;
diag_Sigma_plus_ZtWZI.array() -= 1.;
diag_Sigma_plus_ZtWZI.array() /= sigma2;
if (calc_pred_cov) {
T_mat Maux = Cross_Cov * diag_Sigma_plus_ZtWZI.asDiagonal() * Cross_Cov.transpose();
pred_cov += Maux;
}
if (calc_pred_var) {
T_mat Maux = Cross_Cov * diag_Sigma_plus_ZtWZI.asDiagonal();
T_mat Maux2 = Cross_Cov.cwiseProduct(Maux);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] += Maux2.row(i).sum();
}
}
////Only for debugging
//Log::REInfo("PredictLAApproxOnlyOneGroupedRECalculationsOnREScale");
//for (int i = 0; i < 3; ++i) {
// if (Cross_Cov.rows() > 1) {
// Log::REInfo("Cross_Cov[0:1,%d]: %g, %g", i, Cross_Cov.coeff(0, i), Cross_Cov.coeff(1, i));
// }
// else {
// Log::REInfo("Cross_Cov[0,%d]: %g", i, Cross_Cov.coeff(0, i));
// }
//}
//for (int i = 0; i < 3; ++i) {
// Log::REInfo("ZtFirstDeriv[%d]: %g", i, ZtFirstDeriv[i]);
//}
//for (int i = 0; i < std::min((int)pred_mean.size(),3); ++i) {
// Log::REInfo("pred_mean[%d]: %g", i, pred_mean[i]);
//}
//if (calc_pred_var) {
// for (int i = 0; i < 3; ++i) {
// Log::REInfo("pred_var[%d]: %g", i, pred_var[i]);
// }
//}
}//end PredictLAApproxOnlyOneGroupedRECalculationsOnREScale
/*!
* \brief Make predictions for the (latent) random effects when using the Laplace approximation.
* Calculations are done by factorizing ("inverting) (Sigma^-1 + W) where it is assumed that an approximate Cholesky factor
* of Sigma^-1 has previously been calculated using a Vecchia approximation.
* This version is used for the Laplace approximation when there are only GP random effects and the Vecchia approximation is used.
* Caveat: Sigma^-1 + W can be not very sparse
* \param y_data Response variable data if response variable is continuous
* \param y_data_int Response variable data if response variable is integer-valued (only one of these two is used)
* \param fixed_effects Fixed effects component of location parameter
* \param num_data Number of data points
* \param B Matrix B in Vecchia approximation Sigma^-1 = B^T D^-1 B ("=" Cholesky factor)
* \param D_inv Diagonal matrix D^-1 in Vecchia approximation Sigma^-1 = B^T D^-1 B
* \param Cross_Cov Cross covariance matrix between predicted and obsreved random effects ("=Cov(y_p,y)")
* \param pred_mean[out] Predicted mean
* \param pred_cov[out] Predicted covariance matrix
* \param pred_var[out] Predicted variances
* \param calc_pred_cov If true, predictive covariance matrix is also calculated
* \param calc_pred_var If true, predictive variances are also calculated
* \param calc_mode If true, the mode of the random effects posterior is calculated otherwise the values in mode and a_vec_ are used (default=false)
*/
void PredictLAApproxVecchia(const double* y_data,
const int* y_data_int,
const double* fixed_effects,
const data_size_t num_data,
const sp_mat_t& B,
const sp_mat_t& D_inv,
const T_mat& Cross_Cov,
vec_t& pred_mean,
T_mat& pred_cov,
vec_t& pred_var,
bool calc_pred_cov = false,
bool calc_pred_var = false,
bool calc_mode = false) {
if (calc_mode) {// Calculate mode and Cholesky factor of Sigma^-1 + W at mode
double mll;//approximate marginal likelihood. This is a by-product that is not used here.
FindModePostRandEffCalcMLLVecchia(y_data, y_data_int, fixed_effects, num_data, B, D_inv, mll);
}
else {
CHECK(mode_has_been_calculated_);
}
pred_mean = Cross_Cov * first_deriv_ll_;
if (calc_pred_cov || calc_pred_var) {
T_mat SigmaI_CrossCovT = B.transpose() * D_inv * B * Cross_Cov.transpose();
T_mat Maux = SigmaI_CrossCovT; //Maux = L\(Sigma^-1 * Cross_Cov^T), L = Chol(Sigma^-1 + W)
if (chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP().size() > 0) {//Permutation is only used when having an ordering
Maux = chol_fact_SigmaI_plus_ZtWZ_vecchia_.permutationP() * Maux;
}
chol_fact_SigmaI_plus_ZtWZ_vecchia_.matrixL().solveInPlace(Maux);
if (calc_pred_cov) {
pred_cov += -Cross_Cov * SigmaI_CrossCovT + Maux.transpose() * Maux;
}
if (calc_pred_var) {
Maux = Maux.cwiseProduct(Maux);
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] += Maux.col(i).sum() - (Cross_Cov.row(i)).dot(SigmaI_CrossCovT.col(i));
}
}
}
}//end PredictLAApproxVecchia
/*!
* \brief Make predictions for the response variable (label) based on predictions for the mean and variance of the latent random effects
* \param pred_mean[out] Predicted mean of latent random effects. The predicted mean for the response variables is written on this
* \param pred_var[out] Predicted variances of latent random effects. The predicted variance for the response variables is written on this
* \param predict_var If true, predictive variances are also calculated
*/
void PredictResponse(vec_t& pred_mean, vec_t& pred_var, bool predict_var = false) {
if (likelihood_type_ == "bernoulli_probit") {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_mean[i] = normalCDF(pred_mean[i] / std::sqrt(1. + pred_var[i]));
}
if (predict_var) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] = pred_mean[i] * (1. - pred_mean[i]);
}
}
}
else if (likelihood_type_ == "bernoulli_logit") {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_mean[i] = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]);
}
if (predict_var) {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
pred_var[i] = pred_mean[i] * (1. - pred_mean[i]);
}
}
}
else if (likelihood_type_ == "poisson") {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
double pm = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]);
if (predict_var) {
double psm = RespMeanAdaptiveGHQuadrature(2 * pred_mean[i], 4 * pred_var[i]);
pred_var[i] = psm - pm * pm + pm;
}
pred_mean[i] = pm;
}
}
else if (likelihood_type_ == "gamma") {
#pragma omp parallel for schedule(static)
for (int i = 0; i < (int)pred_mean.size(); ++i) {
double pm = RespMeanAdaptiveGHQuadrature(pred_mean[i], pred_var[i]);
if (predict_var) {
double psm = RespMeanAdaptiveGHQuadrature(2 * pred_mean[i], 4 * pred_var[i]);
pred_var[i] = psm - pm * pm + psm / aux_pars_[0];
}
pred_mean[i] = pm;
}
}
}
/*!
* \brief Adaptive GH quadrature to calculate predictive mean of response variable
* \param latent_mean Predicted mean of latent random effects
* \param latent_var Predicted variances of latent random effects
*/
double RespMeanAdaptiveGHQuadrature(const double latent_mean, const double latent_var) {
// Find mode of integrand
double mode_integrand, mode_integrand_last, update;
mode_integrand = 0.;
double sigma2_inv = 1. / latent_var;
double sqrt_sigma2_inv = std::sqrt(sigma2_inv);
for (int it = 0; it < 100; ++it) {
mode_integrand_last = mode_integrand;
update = (FirstDerivLogCondMeanLikelihood(mode_integrand) - sigma2_inv * (mode_integrand - latent_mean))
/ (SecondDerivLogCondMeanLikelihood(mode_integrand) - sigma2_inv);
mode_integrand -= update;
if (std::abs(update) / std::abs(mode_integrand_last) < DELTA_REL_CONV_) {
break;
}
}
// Adaptive GH quadrature
double sqrt2_sigma_hat = M_SQRT2 / std::sqrt(-SecondDerivLogCondMeanLikelihood(mode_integrand) + sigma2_inv);
double x_val;
double mean_resp = 0.;
for (int j = 0; j < order_GH_; ++j) {
x_val = sqrt2_sigma_hat * GH_nodes_[j] + mode_integrand;
mean_resp += adaptive_GH_weights_[j] * CondMeanLikelihood(x_val) * normalPDF(sqrt_sigma2_inv * (x_val - latent_mean));
}
mean_resp *= sqrt2_sigma_hat * sqrt_sigma2_inv;
return mean_resp;
}
template <typename T>//T can be double or float
bool AreSame(const T a, const T b) const {
return fabs(a - b) < a * EPSILON_;
}
private:
/*! \brief Number of data points */
data_size_t num_data_;
/*! \brief Number (dimension) of random effects */
data_size_t num_re_;
/*! \brief Posterior mode used for Laplace approximation */
vec_t mode_;
/*! \brief Saving a previously found value allows for reseting the mode when having a too large step size. */
vec_t mode_previous_value_;
/*! \brief Auxiliary variable a=ZSigmaZt^-1 mode_b used for Laplace approximation */
vec_t a_vec_;
/*! \brief Saving a previously found value allows for reseting the mode when having a too large step size. */
vec_t a_vec_previous_value_;
/*! \brief Indicates whether the vector a_vec_ / a=ZSigmaZt^-1 is used or not */
bool has_a_vec_;
/*! \brief First derivatives of the log-likelihood */
vec_t first_deriv_ll_;
/*! \brief Second derivatives of the negative log-likelihood (diagonal of matrix "W") */
vec_t second_deriv_neg_ll_;
/*! \brief Diagonal of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in version 'GroupedRE' when there is only one random effect and ZtWZ is diagonal. Otherwise 'diag_SigmaI_plus_ZtWZ_' is used for grouped REs) */
vec_t diag_SigmaI_plus_ZtWZ_;
/*! \brief Cholesky factors of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in version'GroupedRE' if there is more than one random effect). */
chol_sp_mat_AMDOrder_t chol_fact_SigmaI_plus_ZtWZ_grouped_;
/*! \brief Cholesky factors of matrix Sigma^-1 + Zt * W * Z in Laplace approximation (used only in version 'Vecchia') */
chol_sp_mat_AMDOrder_t chol_fact_SigmaI_plus_ZtWZ_vecchia_;
//Note: chol_sp_mat_AMDOrder_t (AMD permutation) is faster than chol_sp_mat_t (no permutation) for the Vecchia approcimation but for the grouped random effects the difference is small.
// chol_sp_mat_COLAMDOrder_t is slower than no ordering or chol_sp_mat_AMDOrder_t for both grouped random effects and the Vecchia approximation
/*!
* \brief Cholesky factors of matrix B = I + Wsqrt * Z * Sigma * Zt * Wsqrt in Laplace approximation (for version 'Stable')
* or of matrix B = Id + ZtWZsqrt * Sigma * ZtWZsqrt (for version 'OnlyOneGPCalculationsOnREScale')
*/
T_chol chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_;
/*! \brief If true, the pattern for the Cholesky factor (chol_fact_Id_plus_Wsqrt_Sigma_Wsqrt_, chol_fact_SigmaI_plus_ZtWZ_grouped_, or chol_fact_SigmaI_plus_ZtWZ_vecchia_) has been analyzed */
bool chol_fact_pattern_analyzed_ = false;
/*! \brief If true, the mode has been initialized to 0 */
bool mode_initialized_ = false;
/*! \brief If true, the mode has been determined */
bool mode_has_been_calculated_ = false;
/*! \brief If true, the function 'CheckY' has been called */
bool normalizing_constant_has_been_calculated_ = false;
/*! \brief Normalizing constant for likelihoods (not all likelihoods have one) */
double log_normalizing_constant_;
/*! \brief Type of likelihood */
string_t likelihood_type_ = "gaussian";
/*! \brief List of supported covariance likelihoods */
const std::set<string_t> SUPPORTED_LIKELIHOODS_{ "gaussian", "bernoulli_probit", "bernoulli_logit", "poisson", "gamma" };
/*! \brief Tolerance level when comparing two doubles for equality */
double EPSILON_ = 1e-6;
/*! \brief Maximal number of iteration done for finding posterior mode with Newton's method */
int MAXIT_MODE_NEWTON_ = 1000;
/*! \brief Used for cheking convergence in mode finding algorithm (terminate if relative change in Laplace approx. is below this value) */
double DELTA_REL_CONV_ = 1e-6;
/*! \brief Additional parameters for likelihoods. For gamma, auxiliary_pars_[0] = shape parameter */
std::vector<double> aux_pars_;
string_t ParseLikelihoodAlias(const string_t& likelihood) {
if (likelihood == string_t("binary") || likelihood == string_t("bernoulli_probit") || likelihood == string_t("binary_probit")) {
return "bernoulli_probit";
}
else if (likelihood == string_t("gaussian") || likelihood == string_t("regression")) {
return "gaussian";
}
return likelihood;
}
/*! \brief Order of the Gauss-Hermite quadrature */
int order_GH_ = 30;
/*! \brief Nodes and weights for the Gauss-Hermite quadrature */
// Source: https://keisan.casio.com/exec/system/1281195844
const std::vector<double> GH_nodes_ = { -6.863345293529891581061,
-6.138279220123934620395,
-5.533147151567495725118,
-4.988918968589943944486,
-4.48305535709251834189,
-4.003908603861228815228,
-3.544443873155349886925,
-3.099970529586441748689,
-2.667132124535617200571,
-2.243391467761504072473,
-1.826741143603688038836,
-1.415527800198188511941,
-1.008338271046723461805,
-0.6039210586255523077782,
-0.2011285765488714855458,
0.2011285765488714855458,
0.6039210586255523077782,
1.008338271046723461805,
1.415527800198188511941,
1.826741143603688038836,
2.243391467761504072473,
2.667132124535617200571,
3.099970529586441748689,
3.544443873155349886925,
4.003908603861228815228,
4.48305535709251834189,
4.988918968589943944486,
5.533147151567495725118,
6.138279220123934620395,
6.863345293529891581061 };
const std::vector<double> GH_weights_ = { 2.908254700131226229411E-21,
2.8103336027509037088E-17,
2.87860708054870606219E-14,
8.106186297463044204E-12,
9.1785804243785282085E-10,
5.10852245077594627739E-8,
1.57909488732471028835E-6,
2.9387252289229876415E-5,
3.48310124318685523421E-4,
0.00273792247306765846299,
0.0147038297048266835153,
0.0551441768702342511681,
0.1467358475408900997517,
0.2801309308392126674135,
0.386394889541813862556,
0.3863948895418138625556,
0.2801309308392126674135,
0.1467358475408900997517,
0.0551441768702342511681,
0.01470382970482668351528,
0.002737922473067658462989,
3.48310124318685523421E-4,
2.938725228922987641501E-5,
1.579094887324710288346E-6,
5.1085224507759462774E-8,
9.1785804243785282085E-10,
8.10618629746304420399E-12,
2.87860708054870606219E-14,
2.81033360275090370876E-17,
2.9082547001312262294E-21 };
const std::vector<double> adaptive_GH_weights_ = { 0.83424747101276179534,
0.64909798155426670071,
0.56940269194964050397,
0.52252568933135454964,
0.491057995832882696506,
0.46837481256472881677,
0.45132103599118862129,
0.438177022652683703695,
0.4279180629327437485828,
0.4198950037368240886418,
0.413679363611138937184,
0.4089815750035316024972,
0.4056051233256844363121,
0.403419816924804022553,
0.402346066701902927115,
0.4023460667019029271154,
0.4034198169248040225528,
0.4056051233256844363121,
0.4089815750035316024972,
0.413679363611138937184,
0.4198950037368240886418,
0.427918062932743748583,
0.4381770226526837037,
0.45132103599118862129,
0.46837481256472881677,
0.4910579958328826965056,
0.52252568933135454964,
0.56940269194964050397,
0.64909798155426670071,
0.83424747101276179534 };
const char* NA_OR_INF_WARNING_ = "Mode finding algorithm for Laplace approximation: NA or Inf occurred. This is not necessary a problem as it might have been the cause of a too large learning rate which, consequently, has been decreased by the algorithm";
const char* NO_INCREASE_IN_MLL_WARNING_ = "Mode finding algorithm for Laplace approximation: The approximate marginal log-likelihood (=convergence criterion) has decreased and the algorithm has thus been terminated.";
const char* NO_CONVERGENCE_WARNING_ = "Algorithm for finding mode for Laplace approximation has not converged after the maximal number of iterations";
/*! \brief Get number of non-zero entries in matrix */
template <class T_mat1, typename std::enable_if< std::is_same<sp_mat_t, T_mat1>::value>::type * = nullptr >
int GetNumberNonZeros(T_mat1 M) {
return((int)M.nonZeros());
};
template <class T_mat1, typename std::enable_if< std::is_same<den_mat_t, T_mat1>::value>::type * = nullptr >
int GetNumberNonZeros(T_mat1 M) {
return((int)M.cols() * M.rows());
};
};//end class Likelihood
} // namespace GPBoost
#endif // GPB_LIKELIHOODS_
|
GB_unaryop__abs_uint32_uint16.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__abs_uint32_uint16
// op(A') function: GB_tran__abs_uint32_uint16
// C type: uint32_t
// A type: uint16_t
// cast: uint32_t cij = (uint32_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CASTING(z, aij) \
uint32_t z = (uint32_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ABS || GxB_NO_UINT32 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__abs_uint32_uint16
(
uint32_t *Cx, // Cx and Ax may be aliased
uint16_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__abs_uint32_uint16
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
convolution_3x3_pack8to1_int8.h | // Tencent is pleased to support the open source community by making ncnn available.
//
// Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
static void conv3x3s1_winograd43_transform_kernel_pack8to1_int8_neon(const Mat& kernel, Mat& kernel_tm_pack8to1, int inch, int outch, const Option& opt)
{
// winograd43 transform kernel
Mat kernel_tm(6 * 6, inch, outch, (size_t)2u);
const short ktm[6][3] = {
{6, 0, 0},
{-4, -4, -4},
{-4, 4, -4},
{1, 2, 4},
{1, -2, 4},
{0, 0, 6}
};
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
for (int q = 0; q < inch; q++)
{
const signed char* kernel0 = (const signed char*)kernel + p * inch * 9 + q * 9;
short* kernel_tm0 = kernel_tm.channel(p).row<short>(q);
// transform kernel
const signed char* k0 = kernel0;
const signed char* k1 = kernel0 + 3;
const signed char* k2 = kernel0 + 6;
// h
short tmp[6][3];
for (int i = 0; i < 6; i++)
{
tmp[i][0] = k0[0] * ktm[i][0] + k0[1] * ktm[i][1] + k0[2] * ktm[i][2];
tmp[i][1] = k1[0] * ktm[i][0] + k1[1] * ktm[i][1] + k1[2] * ktm[i][2];
tmp[i][2] = k2[0] * ktm[i][0] + k2[1] * ktm[i][1] + k2[2] * ktm[i][2];
}
// U
for (int j = 0; j < 6; j++)
{
short* tmpp = &tmp[j][0];
for (int i = 0; i < 6; i++)
{
kernel_tm0[j * 6 + i] = tmpp[0] * ktm[i][0] + tmpp[1] * ktm[i][1] + tmpp[2] * ktm[i][2];
}
}
}
}
// interleave
// src = 36-inch-outch
// dst = 8a-inch/8a-36-outch
kernel_tm_pack8to1.create(8 * inch / 8, 36, outch / 8 + outch % 8, (size_t)2u * 8, 8);
int p = 0;
for (; p + 7 < outch; p += 8)
{
const Mat k0 = kernel_tm.channel(p);
const Mat k1 = kernel_tm.channel(p + 1);
const Mat k2 = kernel_tm.channel(p + 2);
const Mat k3 = kernel_tm.channel(p + 3);
const Mat k4 = kernel_tm.channel(p + 4);
const Mat k5 = kernel_tm.channel(p + 5);
const Mat k6 = kernel_tm.channel(p + 6);
const Mat k7 = kernel_tm.channel(p + 7);
Mat g0 = kernel_tm_pack8to1.channel(p / 8);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00[1] = k1.row<const short>(q + i)[k];
g00[2] = k2.row<const short>(q + i)[k];
g00[3] = k3.row<const short>(q + i)[k];
g00[4] = k4.row<const short>(q + i)[k];
g00[5] = k5.row<const short>(q + i)[k];
g00[6] = k6.row<const short>(q + i)[k];
g00[7] = k7.row<const short>(q + i)[k];
g00 += 8;
}
}
}
}
for (; p < outch; p++)
{
const Mat k0 = kernel_tm.channel(p);
Mat g0 = kernel_tm_pack8to1.channel(p / 8 + p % 8);
for (int k = 0; k < 36; k++)
{
short* g00 = g0.row<short>(k);
for (int q = 0; q + 7 < inch; q += 8)
{
for (int i = 0; i < 8; i++)
{
g00[0] = k0.row<const short>(q + i)[k];
g00 += 1;
}
}
}
}
}
static void conv3x3s1_winograd43_pack8to1_int8_neon(const Mat& bottom_blob, Mat& top_blob, const Mat& kernel_tm, const Option& opt)
{
int w = bottom_blob.w;
int h = bottom_blob.h;
int inch = bottom_blob.c;
// size_t elemsize = bottom_blob.elemsize;
int elempack = bottom_blob.elempack;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
// pad to 4n+2
Mat bottom_blob_bordered = bottom_blob;
outw = (outw + 3) / 4 * 4;
outh = (outh + 3) / 4 * 4;
w = outw + 2;
h = outh + 2;
copy_make_border(bottom_blob, bottom_blob_bordered, 0, h - bottom_blob.h, 0, w - bottom_blob.w, BORDER_CONSTANT, 0.f, opt);
// BEGIN transform input
Mat bottom_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
bottom_blob_tm.create(tiles, 36, inch, 2u * elempack, elempack, opt.workspace_allocator);
// const float itm[4][4] = {
// {4.0f, 0.0f, -5.0f, 0.0f, 1.0f, 0.0f},
// {0.0f,-4.0f, -4.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, -4.0f,-1.0f, 1.0f, 0.0f},
// {0.0f,-2.0f, -1.0f, 2.0f, 1.0f, 0.0f},
// {0.0f, 2.0f, -1.0f,-2.0f, 1.0f, 0.0f},
// {0.0f, 4.0f, 0.0f,-5.0f, 0.0f, 1.0f}
// };
// 0 = 4 * r00 - 5 * r02 + r04
// 1 = -4 * (r01 + r02) + r04 + r03
// 2 = 4 * (r01 - r02) + r04 - r03
// 3 = -2 * (r01 - r03) + r04 - r02
// 4 = 2 * (r01 - r03) + r04 - r02
// 5 = 4 * r01 - 5 * r03 + r05
#pragma omp parallel for num_threads(opt.num_threads)
for (int q = 0; q < inch; q++)
{
const Mat img0 = bottom_blob_bordered.channel(q);
Mat img0_tm = bottom_blob_tm.channel(q);
short tmp[6][6][8];
// tile
for (int i = 0; i < h_tm / 6; i++)
{
for (int j = 0; j < w_tm / 6; j++)
{
const signed char* r0 = img0.row<const signed char>(i * 4) + (j * 4) * 8;
for (int m = 0; m < 6; m++)
{
int8x8_t _r00 = vld1_s8(r0);
int8x8_t _r01 = vld1_s8(r0 + 8);
int8x8_t _r02 = vld1_s8(r0 + 16);
int8x8_t _r03 = vld1_s8(r0 + 24);
int8x8_t _r04 = vld1_s8(r0 + 32);
int8x8_t _r05 = vld1_s8(r0 + 40);
int8x8_t _v4s8 = vdup_n_s8(4);
int8x8_t _v5s8 = vdup_n_s8(5);
int16x8_t _v2 = vdupq_n_s16(2);
int16x8_t _v4 = vdupq_n_s16(4);
// int16x8_t _tmp0m = vfmsq_n_f16(vfmaq_n_f16(_r04, _r00, 4.f), _r02, 5.f);
int16x8_t _tmp0m = vsubq_s16(vaddw_s8(vmull_s8(_r00, _v4s8), _r04), vmull_s8(_r02, _v5s8));
// int16x8_t _tmp1m = vfmsq_n_f16(vaddq_f16(_r04, _r03), vaddq_f16(_r01, _r02), 4.f);
int16x8_t _tmp1m = vmlsq_s16(vaddl_s8(_r04, _r03), vaddl_s8(_r01, _r02), _v4);
// int16x8_t _tmp2m = vfmaq_n_f16(vsubq_f16(_r04, _r03), vsubq_f16(_r01, _r02), 4.f);
int16x8_t _tmp2m = vmlaq_s16(vsubl_s8(_r04, _r03), vsubl_s8(_r01, _r02), _v4);
// int16x8_t _tmp3m = vfmsq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f);
int16x8_t _tmp3m = vmlsq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2);
// int16x8_t _tmp4m = vfmaq_n_f16(vsubq_f16(_r04, _r02), vsubq_f16(_r01, _r03), 2.f);
int16x8_t _tmp4m = vmlaq_s16(vsubl_s8(_r04, _r02), vsubl_s8(_r01, _r03), _v2);
// int16x8_t _tmp5m = vfmsq_n_f16(vfmaq_n_f16(_r05, _r01, 4.f), _r03, 5.f);
int16x8_t _tmp5m = vsubq_s16(vaddw_s8(vmull_s8(_r01, _v4s8), _r05), vmull_s8(_r03, _v5s8));
vst1q_s16(tmp[0][m], _tmp0m);
vst1q_s16(tmp[1][m], _tmp1m);
vst1q_s16(tmp[2][m], _tmp2m);
vst1q_s16(tmp[3][m], _tmp3m);
vst1q_s16(tmp[4][m], _tmp4m);
vst1q_s16(tmp[5][m], _tmp5m);
r0 += w * 8;
}
short* r0_tm_0 = (short*)img0_tm + (i * w_tm / 6 + j) * 8;
short* r0_tm_1 = r0_tm_0 + tiles * 8;
short* r0_tm_2 = r0_tm_0 + tiles * 16;
short* r0_tm_3 = r0_tm_0 + tiles * 24;
short* r0_tm_4 = r0_tm_0 + tiles * 32;
short* r0_tm_5 = r0_tm_0 + tiles * 40;
for (int m = 0; m < 6; m++)
{
int16x8_t _tmp00 = vld1q_s16(tmp[m][0]);
int16x8_t _tmp01 = vld1q_s16(tmp[m][1]);
int16x8_t _tmp02 = vld1q_s16(tmp[m][2]);
int16x8_t _tmp03 = vld1q_s16(tmp[m][3]);
int16x8_t _tmp04 = vld1q_s16(tmp[m][4]);
int16x8_t _tmp05 = vld1q_s16(tmp[m][5]);
int16x8_t _v2 = vdupq_n_s16(2);
int16x8_t _v4 = vdupq_n_s16(4);
int16x8_t _v5 = vdupq_n_s16(5);
int16x8_t _r0tm0 = vmlsq_s16(vmlaq_s16(_tmp04, _tmp00, _v4), _tmp02, _v5);
int16x8_t _r0tm1 = vmlsq_s16(vaddq_s16(_tmp04, _tmp03), vaddq_s16(_tmp01, _tmp02), _v4);
int16x8_t _r0tm2 = vmlaq_s16(vsubq_s16(_tmp04, _tmp03), vsubq_s16(_tmp01, _tmp02), _v4);
int16x8_t _r0tm3 = vmlsq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2);
int16x8_t _r0tm4 = vmlaq_s16(vsubq_s16(_tmp04, _tmp02), vsubq_s16(_tmp01, _tmp03), _v2);
int16x8_t _r0tm5 = vmlsq_s16(vmlaq_s16(_tmp05, _tmp01, _v4), _tmp03, _v5);
vst1q_s16(r0_tm_0, _r0tm0);
vst1q_s16(r0_tm_1, _r0tm1);
vst1q_s16(r0_tm_2, _r0tm2);
vst1q_s16(r0_tm_3, _r0tm3);
vst1q_s16(r0_tm_4, _r0tm4);
vst1q_s16(r0_tm_5, _r0tm5);
r0_tm_0 += tiles * 48;
r0_tm_1 += tiles * 48;
r0_tm_2 += tiles * 48;
r0_tm_3 += tiles * 48;
r0_tm_4 += tiles * 48;
r0_tm_5 += tiles * 48;
}
}
}
}
}
bottom_blob_bordered = Mat();
// END transform input
// BEGIN dot
Mat top_blob_tm;
{
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = h_tm / 6 * w_tm / 6;
// permute
// bottom_blob_tm.create(tiles, 36, inch, elemsize, elempack, opt.workspace_allocator);
Mat bottom_blob_tm2;
#if __aarch64__
if (tiles >= 8)
bottom_blob_tm2.create(8 * inch, tiles / 8 + (tiles % 8) / 4 + tiles % 4, 36, 2u * elempack, elempack, opt.workspace_allocator);
else if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#else
if (tiles >= 4)
bottom_blob_tm2.create(4 * inch, tiles / 4 + tiles % 4, 36, 2u * elempack, elempack, opt.workspace_allocator);
else // if (tiles >= 1)
bottom_blob_tm2.create(1 * inch, tiles, 36, 2u * elempack, elempack, opt.workspace_allocator);
#endif // __aarch64__
#pragma omp parallel for num_threads(opt.num_threads)
for (int r = 0; r < 36; r++)
{
Mat tm2 = bottom_blob_tm2.channel(r);
// tile
int i = 0;
#if __aarch64__
for (; i + 7 < tiles; i += 8)
{
short* tm2p = tm2.row<short>(i / 8);
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x8
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0], #64 \n"
"ld4 {v4.8h, v5.8h, v6.8h, v7.8h}, [%0] \n"
"sub %0, %0, #64 \n"
"uzp1 v16.8h, v0.8h, v4.8h \n"
"uzp2 v20.8h, v0.8h, v4.8h \n"
"uzp1 v17.8h, v1.8h, v5.8h \n"
"uzp2 v21.8h, v1.8h, v5.8h \n"
"uzp1 v18.8h, v2.8h, v6.8h \n"
"uzp2 v22.8h, v2.8h, v6.8h \n"
"uzp1 v19.8h, v3.8h, v7.8h \n"
"uzp2 v23.8h, v3.8h, v7.8h \n"
"st1 {v16.8h, v17.8h, v18.8h, v19.8h}, [%1], #64 \n"
"st1 {v20.8h, v21.8h, v22.8h, v23.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23");
r0 += bottom_blob_tm.cstep * 8;
}
}
#endif
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
short* tm2p = tm2.row<short>(i / 8 + (i % 8) / 4);
#else
short* tm2p = tm2.row<short>(i / 4);
#endif
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
// transpose 8x4
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%0] \n"
"st4 {v0.8h, v1.8h, v2.8h, v3.8h}, [%1], #64 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0", "v1", "v2", "v3");
#else
asm volatile(
"pld [%0, #512] \n"
"vldm %0, {d0-d7} \n"
"vswp d1, d2 \n"
"vswp d5, d6 \n"
"vswp q1, q2 \n"
"vst4.s16 {d0-d3}, [%1 :64]! \n"
"vst4.s16 {d4-d7}, [%1 :64]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0", "q1", "q2", "q3");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 8;
}
}
for (; i < tiles; i++)
{
#if __aarch64__
short* tm2p = tm2.row<short>(i / 8 + (i % 8) / 4 + i % 4);
#else
short* tm2p = tm2.row<short>(i / 4 + i % 4);
#endif
const short* r0 = bottom_blob_tm;
r0 += (r * tiles + i) * 8;
for (int q = 0; q < inch; q++)
{
#if __aarch64__
asm volatile(
"prfm pldl1keep, [%0, #128] \n"
"ld1 {v0.8h}, [%0] \n"
"st1 {v0.8h}, [%1], #16 \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "v0");
#else
asm volatile(
"pld [%0, #128] \n"
"vld1.s16 {d0-d1}, [%0 :64] \n"
"vst1.s16 {d0-d1}, [%1 :64]! \n"
: "=r"(r0), // %0
"=r"(tm2p) // %1
: "0"(r0),
"1"(tm2p)
: "memory", "q0");
#endif // __aarch64__
r0 += bottom_blob_tm.cstep * 8;
}
}
}
bottom_blob_tm = Mat();
// permute end
top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator);
int nn_outch = 0;
int remain_outch_start = 0;
nn_outch = outch >> 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int pp = 0; pp < nn_outch; pp++)
{
int p = pp * 8;
int* output0_tm = top_blob_tm.channel(p);
int* output1_tm = top_blob_tm.channel(p + 1);
int* output2_tm = top_blob_tm.channel(p + 2);
int* output3_tm = top_blob_tm.channel(p + 3);
int* output4_tm = top_blob_tm.channel(p + 4);
int* output5_tm = top_blob_tm.channel(p + 5);
int* output6_tm = top_blob_tm.channel(p + 6);
int* output7_tm = top_blob_tm.channel(p + 7);
const Mat kernel01_tm = kernel_tm.channel(p / 8);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 7 < tiles; i += 8)
{
const short* r0 = bb2.row<const short>(i / 8);
const short* kptr = kernel01_tm.row<const short>(r);
int nn = inch; // inch always > 0
asm volatile(
"eor v16.16b, v16.16b, v16.16b \n"
"eor v17.16b, v17.16b, v17.16b \n"
"eor v18.16b, v18.16b, v18.16b \n"
"eor v19.16b, v19.16b, v19.16b \n"
"eor v20.16b, v20.16b, v20.16b \n"
"eor v21.16b, v21.16b, v21.16b \n"
"eor v22.16b, v22.16b, v22.16b \n"
"eor v23.16b, v23.16b, v23.16b \n"
"eor v24.16b, v24.16b, v24.16b \n"
"eor v25.16b, v25.16b, v25.16b \n"
"eor v26.16b, v26.16b, v26.16b \n"
"eor v27.16b, v27.16b, v27.16b \n"
"eor v28.16b, v28.16b, v28.16b \n"
"eor v29.16b, v29.16b, v29.16b \n"
"eor v30.16b, v30.16b, v30.16b \n"
"eor v31.16b, v31.16b, v31.16b \n"
"0: \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v8.8h, v9.8h, v10.8h, v11.8h}, [%9], #64 \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v0.8h, v1.8h, v2.8h, v3.8h}, [%10], #64 \n"
"smlal v16.4s, v8.4h, v0.h[0] \n"
"smlal2 v17.4s, v8.8h, v0.h[0] \n"
"smlal v18.4s, v8.4h, v0.h[1] \n"
"smlal2 v19.4s, v8.8h, v0.h[1] \n"
"smlal v20.4s, v8.4h, v0.h[2] \n"
"smlal2 v21.4s, v8.8h, v0.h[2] \n"
"smlal v22.4s, v8.4h, v0.h[3] \n"
"smlal2 v23.4s, v8.8h, v0.h[3] \n"
"smlal v24.4s, v8.4h, v0.h[4] \n"
"smlal2 v25.4s, v8.8h, v0.h[4] \n"
"smlal v26.4s, v8.4h, v0.h[5] \n"
"smlal2 v27.4s, v8.8h, v0.h[5] \n"
"smlal v28.4s, v8.4h, v0.h[6] \n"
"smlal2 v29.4s, v8.8h, v0.h[6] \n"
"smlal v30.4s, v8.4h, v0.h[7] \n"
"smlal2 v31.4s, v8.8h, v0.h[7] \n"
"smlal v16.4s, v9.4h, v1.h[0] \n"
"smlal2 v17.4s, v9.8h, v1.h[0] \n"
"smlal v18.4s, v9.4h, v1.h[1] \n"
"smlal2 v19.4s, v9.8h, v1.h[1] \n"
"smlal v20.4s, v9.4h, v1.h[2] \n"
"smlal2 v21.4s, v9.8h, v1.h[2] \n"
"smlal v22.4s, v9.4h, v1.h[3] \n"
"smlal2 v23.4s, v9.8h, v1.h[3] \n"
"smlal v24.4s, v9.4h, v1.h[4] \n"
"smlal2 v25.4s, v9.8h, v1.h[4] \n"
"smlal v26.4s, v9.4h, v1.h[5] \n"
"smlal2 v27.4s, v9.8h, v1.h[5] \n"
"smlal v28.4s, v9.4h, v1.h[6] \n"
"smlal2 v29.4s, v9.8h, v1.h[6] \n"
"smlal v30.4s, v9.4h, v1.h[7] \n"
"smlal2 v31.4s, v9.8h, v1.h[7] \n"
"prfm pldl1keep, [%9, #512] \n"
"ld1 {v12.8h, v13.8h, v14.8h, v15.8h}, [%9], #64 \n"
"smlal v16.4s, v10.4h, v2.h[0] \n"
"smlal2 v17.4s, v10.8h, v2.h[0] \n"
"smlal v18.4s, v10.4h, v2.h[1] \n"
"smlal2 v19.4s, v10.8h, v2.h[1] \n"
"smlal v20.4s, v10.4h, v2.h[2] \n"
"smlal2 v21.4s, v10.8h, v2.h[2] \n"
"smlal v22.4s, v10.4h, v2.h[3] \n"
"smlal2 v23.4s, v10.8h, v2.h[3] \n"
"smlal v24.4s, v10.4h, v2.h[4] \n"
"smlal2 v25.4s, v10.8h, v2.h[4] \n"
"smlal v26.4s, v10.4h, v2.h[5] \n"
"smlal2 v27.4s, v10.8h, v2.h[5] \n"
"smlal v28.4s, v10.4h, v2.h[6] \n"
"smlal2 v29.4s, v10.8h, v2.h[6] \n"
"smlal v30.4s, v10.4h, v2.h[7] \n"
"smlal2 v31.4s, v10.8h, v2.h[7] \n"
"prfm pldl1keep, [%10, #512] \n"
"ld1 {v4.8h, v5.8h, v6.8h, v7.8h}, [%10], #64 \n"
"smlal v16.4s, v11.4h, v3.h[0] \n"
"smlal2 v17.4s, v11.8h, v3.h[0] \n"
"smlal v18.4s, v11.4h, v3.h[1] \n"
"smlal2 v19.4s, v11.8h, v3.h[1] \n"
"smlal v20.4s, v11.4h, v3.h[2] \n"
"smlal2 v21.4s, v11.8h, v3.h[2] \n"
"smlal v22.4s, v11.4h, v3.h[3] \n"
"smlal2 v23.4s, v11.8h, v3.h[3] \n"
"smlal v24.4s, v11.4h, v3.h[4] \n"
"smlal2 v25.4s, v11.8h, v3.h[4] \n"
"smlal v26.4s, v11.4h, v3.h[5] \n"
"smlal2 v27.4s, v11.8h, v3.h[5] \n"
"smlal v28.4s, v11.4h, v3.h[6] \n"
"smlal2 v29.4s, v11.8h, v3.h[6] \n"
"smlal v30.4s, v11.4h, v3.h[7] \n"
"smlal2 v31.4s, v11.8h, v3.h[7] \n"
"smlal v16.4s, v12.4h, v4.h[0] \n"
"smlal2 v17.4s, v12.8h, v4.h[0] \n"
"smlal v18.4s, v12.4h, v4.h[1] \n"
"smlal2 v19.4s, v12.8h, v4.h[1] \n"
"smlal v20.4s, v12.4h, v4.h[2] \n"
"smlal2 v21.4s, v12.8h, v4.h[2] \n"
"smlal v22.4s, v12.4h, v4.h[3] \n"
"smlal2 v23.4s, v12.8h, v4.h[3] \n"
"smlal v24.4s, v12.4h, v4.h[4] \n"
"smlal2 v25.4s, v12.8h, v4.h[4] \n"
"smlal v26.4s, v12.4h, v4.h[5] \n"
"smlal2 v27.4s, v12.8h, v4.h[5] \n"
"smlal v28.4s, v12.4h, v4.h[6] \n"
"smlal2 v29.4s, v12.8h, v4.h[6] \n"
"smlal v30.4s, v12.4h, v4.h[7] \n"
"smlal2 v31.4s, v12.8h, v4.h[7] \n"
"smlal v16.4s, v13.4h, v5.h[0] \n"
"smlal2 v17.4s, v13.8h, v5.h[0] \n"
"smlal v18.4s, v13.4h, v5.h[1] \n"
"smlal2 v19.4s, v13.8h, v5.h[1] \n"
"smlal v20.4s, v13.4h, v5.h[2] \n"
"smlal2 v21.4s, v13.8h, v5.h[2] \n"
"smlal v22.4s, v13.4h, v5.h[3] \n"
"smlal2 v23.4s, v13.8h, v5.h[3] \n"
"smlal v24.4s, v13.4h, v5.h[4] \n"
"smlal2 v25.4s, v13.8h, v5.h[4] \n"
"smlal v26.4s, v13.4h, v5.h[5] \n"
"smlal2 v27.4s, v13.8h, v5.h[5] \n"
"smlal v28.4s, v13.4h, v5.h[6] \n"
"smlal2 v29.4s, v13.8h, v5.h[6] \n"
"smlal v30.4s, v13.4h, v5.h[7] \n"
"smlal2 v31.4s, v13.8h, v5.h[7] \n"
"smlal v16.4s, v14.4h, v6.h[0] \n"
"smlal2 v17.4s, v14.8h, v6.h[0] \n"
"smlal v18.4s, v14.4h, v6.h[1] \n"
"smlal2 v19.4s, v14.8h, v6.h[1] \n"
"smlal v20.4s, v14.4h, v6.h[2] \n"
"smlal2 v21.4s, v14.8h, v6.h[2] \n"
"smlal v22.4s, v14.4h, v6.h[3] \n"
"smlal2 v23.4s, v14.8h, v6.h[3] \n"
"smlal v24.4s, v14.4h, v6.h[4] \n"
"smlal2 v25.4s, v14.8h, v6.h[4] \n"
"smlal v26.4s, v14.4h, v6.h[5] \n"
"smlal2 v27.4s, v14.8h, v6.h[5] \n"
"smlal v28.4s, v14.4h, v6.h[6] \n"
"smlal2 v29.4s, v14.8h, v6.h[6] \n"
"smlal v30.4s, v14.4h, v6.h[7] \n"
"smlal2 v31.4s, v14.8h, v6.h[7] \n"
"subs %w0, %w0, #1 \n"
"smlal v16.4s, v15.4h, v7.h[0] \n"
"smlal2 v17.4s, v15.8h, v7.h[0] \n"
"smlal v18.4s, v15.4h, v7.h[1] \n"
"smlal2 v19.4s, v15.8h, v7.h[1] \n"
"smlal v20.4s, v15.4h, v7.h[2] \n"
"smlal2 v21.4s, v15.8h, v7.h[2] \n"
"smlal v22.4s, v15.4h, v7.h[3] \n"
"smlal2 v23.4s, v15.8h, v7.h[3] \n"
"smlal v24.4s, v15.4h, v7.h[4] \n"
"smlal2 v25.4s, v15.8h, v7.h[4] \n"
"smlal v26.4s, v15.4h, v7.h[5] \n"
"smlal2 v27.4s, v15.8h, v7.h[5] \n"
"smlal v28.4s, v15.4h, v7.h[6] \n"
"smlal2 v29.4s, v15.8h, v7.h[6] \n"
"smlal v30.4s, v15.4h, v7.h[7] \n"
"smlal2 v31.4s, v15.8h, v7.h[7] \n"
"bne 0b \n"
"st1 {v16.4s, v17.4s}, [%1], #32 \n"
"st1 {v18.4s, v19.4s}, [%2], #32 \n"
"st1 {v20.4s, v21.4s}, [%3], #32 \n"
"st1 {v22.4s, v23.4s}, [%4], #32 \n"
"st1 {v24.4s, v25.4s}, [%5], #32 \n"
"st1 {v26.4s, v27.4s}, [%6], #32 \n"
"st1 {v28.4s, v29.4s}, [%7], #32 \n"
"st1 {v30.4s, v31.4s}, [%8], #32 \n"
: "=r"(nn), // %0
"=r"(output0_tm), // %1
"=r"(output1_tm), // %2
"=r"(output2_tm), // %3
"=r"(output3_tm), // %4
"=r"(output4_tm), // %5
"=r"(output5_tm), // %6
"=r"(output6_tm), // %7
"=r"(output7_tm), // %8
"=r"(r0), // %9
"=r"(kptr) // %10
: "0"(nn),
"1"(output0_tm),
"2"(output1_tm),
"3"(output2_tm),
"4"(output3_tm),
"5"(output4_tm),
"6"(output5_tm),
"7"(output6_tm),
"8"(output7_tm),
"9"(r0),
"10"(kptr)
: "cc", "memory", "v0", "v1", "v2", "v3", "v4", "v5", "v6", "v7", "v8", "v9", "v10", "v11", "v12", "v13", "v14", "v15", "v16", "v17", "v18", "v19", "v20", "v21", "v22", "v23", "v24", "v25", "v26", "v27", "v28", "v29", "v30", "v31");
}
#endif
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4);
#else
const short* r0 = bb2.row<const short>(i / 4);
#endif
const short* k0 = kernel01_tm.row<const short>(r);
int nn = inch; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
int32x4_t _sum4 = vdupq_n_s32(0);
int32x4_t _sum5 = vdupq_n_s32(0);
int32x4_t _sum6 = vdupq_n_s32(0);
int32x4_t _sum7 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int16x8_t _val0 = vld1q_s16(r0);
int16x8_t _val1 = vld1q_s16(r0 + 8);
int16x8_t _val2 = vld1q_s16(r0 + 16);
int16x8_t _val3 = vld1q_s16(r0 + 24);
int16x8_t _w0 = vld1q_s16(k0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val0), vget_low_s16(_w0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val0), vget_low_s16(_w0), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val0), vget_low_s16(_w0), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val0), vget_low_s16(_w0), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val0), vget_high_s16(_w0), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val0), vget_high_s16(_w0), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val0), vget_high_s16(_w0), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val0), vget_high_s16(_w0), 3);
int16x8_t _w1 = vld1q_s16(k0 + 8);
_sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val0), vget_low_s16(_w1), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val0), vget_low_s16(_w1), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val0), vget_low_s16(_w1), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val0), vget_low_s16(_w1), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val0), vget_high_s16(_w1), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val0), vget_high_s16(_w1), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val0), vget_high_s16(_w1), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val0), vget_high_s16(_w1), 3);
int16x8_t _w2 = vld1q_s16(k0 + 16);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val1), vget_low_s16(_w2), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val1), vget_low_s16(_w2), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val1), vget_low_s16(_w2), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val1), vget_low_s16(_w2), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val1), vget_high_s16(_w2), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val1), vget_high_s16(_w2), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val1), vget_high_s16(_w2), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val1), vget_high_s16(_w2), 3);
int16x8_t _w3 = vld1q_s16(k0 + 24);
_sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val1), vget_low_s16(_w3), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val1), vget_low_s16(_w3), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val1), vget_low_s16(_w3), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val1), vget_low_s16(_w3), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val1), vget_high_s16(_w3), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val1), vget_high_s16(_w3), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val1), vget_high_s16(_w3), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val1), vget_high_s16(_w3), 3);
int16x8_t _w4 = vld1q_s16(k0 + 32);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val2), vget_low_s16(_w4), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val2), vget_low_s16(_w4), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val2), vget_low_s16(_w4), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val2), vget_low_s16(_w4), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val2), vget_high_s16(_w4), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val2), vget_high_s16(_w4), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val2), vget_high_s16(_w4), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val2), vget_high_s16(_w4), 3);
int16x8_t _w5 = vld1q_s16(k0 + 40);
_sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val2), vget_low_s16(_w5), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val2), vget_low_s16(_w5), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val2), vget_low_s16(_w5), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val2), vget_low_s16(_w5), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val2), vget_high_s16(_w5), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val2), vget_high_s16(_w5), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val2), vget_high_s16(_w5), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val2), vget_high_s16(_w5), 3);
int16x8_t _w6 = vld1q_s16(k0 + 48);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_val3), vget_low_s16(_w6), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_val3), vget_low_s16(_w6), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_val3), vget_low_s16(_w6), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_low_s16(_val3), vget_low_s16(_w6), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_val3), vget_high_s16(_w6), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_val3), vget_high_s16(_w6), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_val3), vget_high_s16(_w6), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_low_s16(_val3), vget_high_s16(_w6), 3);
int16x8_t _w7 = vld1q_s16(k0 + 56);
_sum0 = vmlal_lane_s16(_sum0, vget_high_s16(_val3), vget_low_s16(_w7), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_val3), vget_low_s16(_w7), 1);
_sum2 = vmlal_lane_s16(_sum2, vget_high_s16(_val3), vget_low_s16(_w7), 2);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_val3), vget_low_s16(_w7), 3);
_sum4 = vmlal_lane_s16(_sum4, vget_high_s16(_val3), vget_high_s16(_w7), 0);
_sum5 = vmlal_lane_s16(_sum5, vget_high_s16(_val3), vget_high_s16(_w7), 1);
_sum6 = vmlal_lane_s16(_sum6, vget_high_s16(_val3), vget_high_s16(_w7), 2);
_sum7 = vmlal_lane_s16(_sum7, vget_high_s16(_val3), vget_high_s16(_w7), 3);
r0 += 32;
k0 += 64;
}
vst1q_s32(output0_tm, _sum0);
vst1q_s32(output1_tm, _sum1);
vst1q_s32(output2_tm, _sum2);
vst1q_s32(output3_tm, _sum3);
vst1q_s32(output4_tm, _sum4);
vst1q_s32(output5_tm, _sum5);
vst1q_s32(output6_tm, _sum6);
vst1q_s32(output7_tm, _sum7);
output0_tm += 4;
output1_tm += 4;
output2_tm += 4;
output3_tm += 4;
output4_tm += 4;
output5_tm += 4;
output6_tm += 4;
output7_tm += 4;
}
for (; i < tiles; i++)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4 + i % 4);
#else
const short* r0 = bb2.row<const short>(i / 4 + i % 4);
#endif
const short* k0 = kernel01_tm.row<const short>(r);
int nn = inch; // inch always > 0
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
for (int j = 0; j < nn; j++)
{
int16x8_t _val0 = vld1q_s16(r0);
int16x8_t _w0 = vld1q_s16(k0);
int16x8_t _w1 = vld1q_s16(k0 + 8);
int16x8_t _w2 = vld1q_s16(k0 + 16);
int16x8_t _w3 = vld1q_s16(k0 + 24);
int16x8_t _w4 = vld1q_s16(k0 + 32);
int16x8_t _w5 = vld1q_s16(k0 + 40);
int16x8_t _w6 = vld1q_s16(k0 + 48);
int16x8_t _w7 = vld1q_s16(k0 + 56);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w0), vget_low_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w0), vget_low_s16(_val0), 0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w1), vget_low_s16(_val0), 1);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w1), vget_low_s16(_val0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w2), vget_low_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w2), vget_low_s16(_val0), 2);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w3), vget_low_s16(_val0), 3);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w3), vget_low_s16(_val0), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w4), vget_high_s16(_val0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w4), vget_high_s16(_val0), 0);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w5), vget_high_s16(_val0), 1);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w5), vget_high_s16(_val0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w6), vget_high_s16(_val0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w6), vget_high_s16(_val0), 2);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_w7), vget_high_s16(_val0), 3);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_w7), vget_high_s16(_val0), 3);
r0 += 8;
k0 += 64;
}
output0_tm[0] = vgetq_lane_s32(_sum0, 0);
output1_tm[0] = vgetq_lane_s32(_sum0, 1);
output2_tm[0] = vgetq_lane_s32(_sum0, 2);
output3_tm[0] = vgetq_lane_s32(_sum0, 3);
output4_tm[0] = vgetq_lane_s32(_sum1, 0);
output5_tm[0] = vgetq_lane_s32(_sum1, 1);
output6_tm[0] = vgetq_lane_s32(_sum1, 2);
output7_tm[0] = vgetq_lane_s32(_sum1, 3);
output0_tm += 1;
output1_tm += 1;
output2_tm += 1;
output3_tm += 1;
output4_tm += 1;
output5_tm += 1;
output6_tm += 1;
output7_tm += 1;
}
}
}
remain_outch_start += nn_outch << 3;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = remain_outch_start; p < outch; p++)
{
int* output0_tm = top_blob_tm.channel(p);
const Mat kernel0_tm = kernel_tm.channel(p / 8 + p % 8);
for (int r = 0; r < 36; r++)
{
const Mat bb2 = bottom_blob_tm2.channel(r);
int i = 0;
#if __aarch64__
for (; i + 7 < tiles; i += 8)
{
const short* r0 = bb2.row<const short>(i / 8);
const short* kptr = kernel0_tm.row<const short>(r);
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
int32x4_t _sum2 = vdupq_n_s32(0);
int32x4_t _sum3 = vdupq_n_s32(0);
for (int q = 0; q < inch; q++)
{
int16x8_t _r0 = vld1q_s16(r0);
int16x8_t _r1 = vld1q_s16(r0 + 8);
int16x8_t _r2 = vld1q_s16(r0 + 16);
int16x8_t _r3 = vld1q_s16(r0 + 24);
int16x8_t _r4 = vld1q_s16(r0 + 32);
int16x8_t _r5 = vld1q_s16(r0 + 40);
int16x8_t _r6 = vld1q_s16(r0 + 48);
int16x8_t _r7 = vld1q_s16(r0 + 56);
int16x8_t _k0 = vld1q_s16(kptr);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r0), vget_low_s16(_k0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r0), vget_low_s16(_k0), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r1), vget_low_s16(_k0), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r1), vget_low_s16(_k0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r2), vget_low_s16(_k0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r2), vget_low_s16(_k0), 2);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r3), vget_low_s16(_k0), 3);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r3), vget_low_s16(_k0), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r4), vget_high_s16(_k0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r4), vget_high_s16(_k0), 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r5), vget_high_s16(_k0), 1);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r5), vget_high_s16(_k0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r6), vget_high_s16(_k0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r6), vget_high_s16(_k0), 2);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r7), vget_high_s16(_k0), 3);
_sum3 = vmlal_lane_s16(_sum3, vget_high_s16(_r7), vget_high_s16(_k0), 3);
kptr += 8;
r0 += 64;
}
_sum0 = vaddq_s32(_sum0, _sum2);
_sum1 = vaddq_s32(_sum1, _sum3);
vst1q_s32(output0_tm, _sum0);
vst1q_s32(output0_tm + 4, _sum1);
output0_tm += 8;
}
#endif
for (; i + 3 < tiles; i += 4)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4);
#else
const short* r0 = bb2.row<const short>(i / 4);
#endif
const short* kptr = kernel0_tm.row<const short>(r);
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
for (int q = 0; q < inch; q++)
{
int16x8_t _r0 = vld1q_s16(r0);
int16x8_t _r1 = vld1q_s16(r0 + 8);
int16x8_t _r2 = vld1q_s16(r0 + 16);
int16x8_t _r3 = vld1q_s16(r0 + 24);
int16x8_t _k0 = vld1q_s16(kptr);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r0), vget_low_s16(_k0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r0), vget_low_s16(_k0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r1), vget_low_s16(_k0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r1), vget_low_s16(_k0), 3);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r2), vget_high_s16(_k0), 0);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r2), vget_high_s16(_k0), 1);
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r3), vget_high_s16(_k0), 2);
_sum1 = vmlal_lane_s16(_sum1, vget_high_s16(_r3), vget_high_s16(_k0), 3);
kptr += 8;
r0 += 32;
}
int32x4_t _sum01 = vaddq_s32(_sum0, _sum1);
vst1q_s32(output0_tm, _sum01);
output0_tm += 4;
}
for (; i < tiles; i++)
{
#if __aarch64__
const short* r0 = bb2.row<const short>(i / 8 + (i % 8) / 4 + i % 4);
#else
const short* r0 = bb2.row<const short>(i / 4 + i % 4);
#endif
const short* kptr = kernel0_tm.row<const short>(r);
int32x4_t _sum0 = vdupq_n_s32(0);
int32x4_t _sum1 = vdupq_n_s32(0);
for (int q = 0; q < inch; q++)
{
int16x8_t _r0 = vld1q_s16(r0);
int16x8_t _k0 = vld1q_s16(kptr);
_sum0 = vmlal_s16(_sum0, vget_low_s16(_r0), vget_low_s16(_k0));
_sum1 = vmlal_s16(_sum1, vget_high_s16(_r0), vget_high_s16(_k0));
kptr += 8;
r0 += 8;
}
int32x4_t _sum = vaddq_s32(_sum0, _sum1);
#if __aarch64__
int sum = vaddvq_s32(_sum); // dot
#else
int32x2_t _ss = vadd_s32(vget_low_s32(_sum), vget_high_s32(_sum));
_ss = vpadd_s32(_ss, _ss);
int sum = vget_lane_s32(_ss, 0);
#endif
output0_tm[0] = sum;
output0_tm++;
}
}
}
}
bottom_blob_tm = Mat();
// END dot
// BEGIN transform output
Mat top_blob_bordered;
if (outw == top_blob.w && outh == top_blob.h)
{
top_blob_bordered = top_blob;
}
else
{
top_blob_bordered.create(outw, outh, outch, 4u, 1, opt.workspace_allocator);
}
{
// const float otm[4][6] = {
// {1.0f, 1.0f, 1.0f, 1.0f, 1.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 2.0f, -2.0f, 0.0f},
// {0.0f, 1.0f, 1.0f, 4.0f, 4.0f, 0.0f},
// {0.0f, 1.0f, -1.0f, 8.0f, -8.0f, 1.0f}
// };
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
int w_tm = outw / 4 * 6;
int h_tm = outh / 4 * 6;
const int tiles = w_tm / 6 * h_tm / 6;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
const Mat out0_tm = top_blob_tm.channel(p);
Mat out0 = top_blob_bordered.channel(p);
int tmp[4][6];
// tile
for (int i = 0; i < outh / 4; i++)
{
for (int j = 0; j < outw / 4; j++)
{
// top_blob_tm.create(tiles, 36, outch, 4u, 1, opt.workspace_allocator);
const int* output0_tm_0 = (const int*)out0_tm + (i * w_tm / 6 + j) * 1;
const int* output0_tm_1 = output0_tm_0 + tiles * 1;
const int* output0_tm_2 = output0_tm_0 + tiles * 2;
const int* output0_tm_3 = output0_tm_0 + tiles * 3;
const int* output0_tm_4 = output0_tm_0 + tiles * 4;
const int* output0_tm_5 = output0_tm_0 + tiles * 5;
int* output0 = out0.row<int>(i * 4) + j * 4;
// 0 = r00 + (r01 + r02) + (r03 + r04)
// 1 = (r01 - r02) + (r03 - r04) * 2
// 2 = (r01 + r02) + (r03 + r04) * 4
// 3 = r05 + (r01 - r02) + (r03 - r04) * 8
// TODO neon optimize
for (int m = 0; m < 5; m++)
{
int tmp02a = output0_tm_1[0] + output0_tm_2[0];
int tmp13a = output0_tm_1[0] - output0_tm_2[0];
int tmp02b = output0_tm_3[0] + output0_tm_4[0];
int tmp13b = output0_tm_3[0] - output0_tm_4[0];
tmp[0][m] = output0_tm_0[0] + tmp02a + tmp02b;
tmp[1][m] = tmp13a + tmp13b * 2;
tmp[2][m] = tmp02a + tmp02b * 4;
tmp[3][m] = output0_tm_5[0] * 4 + tmp13a + tmp13b * 8;
output0_tm_0 += tiles * 6;
output0_tm_1 += tiles * 6;
output0_tm_2 += tiles * 6;
output0_tm_3 += tiles * 6;
output0_tm_4 += tiles * 6;
output0_tm_5 += tiles * 6;
}
for (int m = 5; m < 6; m++)
{
int tmp02a = output0_tm_1[0] + output0_tm_2[0];
int tmp13a = output0_tm_1[0] - output0_tm_2[0];
int tmp02b = output0_tm_3[0] + output0_tm_4[0];
int tmp13b = output0_tm_3[0] - output0_tm_4[0];
tmp[0][m] = (output0_tm_0[0] + tmp02a + tmp02b) * 4;
tmp[1][m] = (tmp13a + tmp13b * 2) * 4;
tmp[2][m] = (tmp02a + tmp02b * 4) * 4;
tmp[3][m] = (output0_tm_5[0] * 4 + tmp13a + tmp13b * 8) * 4;
output0_tm_0 += tiles * 6;
output0_tm_1 += tiles * 6;
output0_tm_2 += tiles * 6;
output0_tm_3 += tiles * 6;
output0_tm_4 += tiles * 6;
output0_tm_5 += tiles * 6;
}
for (int m = 0; m < 4; m++)
{
const int* tmp0 = tmp[m];
int tmp02a = tmp0[1] + tmp0[2];
int tmp13a = tmp0[1] - tmp0[2];
int tmp02b = tmp0[3] + tmp0[4];
int tmp13b = tmp0[3] - tmp0[4];
output0[0] = (tmp0[0] + tmp02a + tmp02b) / 576;
output0[1] = (tmp13a + tmp13b * 2) / 576;
output0[2] = (tmp02a + tmp02b * 4) / 576;
output0[3] = (tmp0[5] + tmp13a + tmp13b * 8) / 576;
output0 += outw;
}
}
}
}
}
// END transform output
// cut result pad
copy_cut_border(top_blob_bordered, top_blob, 0, top_blob_bordered.h - top_blob.h, 0, top_blob_bordered.w - top_blob.w, opt);
}
|
Sema.h | //===--- Sema.h - Semantic Analysis & AST Building --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the Sema class, which performs semantic analysis and
// builds ASTs.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_SEMA_SEMA_H
#define LLVM_CLANG_SEMA_SEMA_H
#include "clang/AST/Attr.h"
#include "clang/AST/Availability.h"
#include "clang/AST/ComparisonCategories.h"
#include "clang/AST/DeclTemplate.h"
#include "clang/AST/DeclarationName.h"
#include "clang/AST/Expr.h"
#include "clang/AST/ExprCXX.h"
#include "clang/AST/ExprObjC.h"
#include "clang/AST/ExternalASTSource.h"
#include "clang/AST/LocInfoType.h"
#include "clang/AST/MangleNumberingContext.h"
#include "clang/AST/NSAPI.h"
#include "clang/AST/PrettyPrinter.h"
#include "clang/AST/StmtCXX.h"
#include "clang/AST/TypeLoc.h"
#include "clang/AST/TypeOrdering.h"
#include "clang/Basic/ExpressionTraits.h"
#include "clang/Basic/Module.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/PragmaKinds.h"
#include "clang/Basic/Specifiers.h"
#include "clang/Basic/TemplateKinds.h"
#include "clang/Basic/TypeTraits.h"
#include "clang/Sema/AnalysisBasedWarnings.h"
#include "clang/Sema/CleanupInfo.h"
#include "clang/Sema/DeclSpec.h"
#include "clang/Sema/ExternalSemaSource.h"
#include "clang/Sema/IdentifierResolver.h"
#include "clang/Sema/ObjCMethodList.h"
#include "clang/Sema/Ownership.h"
#include "clang/Sema/Scope.h"
#include "clang/Sema/TypoCorrection.h"
#include "clang/Sema/Weak.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/SetVector.h"
#include "llvm/ADT/SmallBitVector.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include <deque>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class APSInt;
template <typename ValueT> struct DenseMapInfo;
template <typename ValueT, typename ValueInfoT> class DenseSet;
class SmallBitVector;
struct InlineAsmIdentifierInfo;
}
namespace clang {
class ADLResult;
class ASTConsumer;
class ASTContext;
class ASTMutationListener;
class ASTReader;
class ASTWriter;
class ArrayType;
class ParsedAttr;
class BindingDecl;
class BlockDecl;
class CapturedDecl;
class CXXBasePath;
class CXXBasePaths;
class CXXBindTemporaryExpr;
typedef SmallVector<CXXBaseSpecifier*, 4> CXXCastPath;
class CXXConstructorDecl;
class CXXConversionDecl;
class CXXDeleteExpr;
class CXXDestructorDecl;
class CXXFieldCollector;
class CXXMemberCallExpr;
class CXXMethodDecl;
class CXXScopeSpec;
class CXXTemporary;
class CXXTryStmt;
class CallExpr;
class ClassTemplateDecl;
class ClassTemplatePartialSpecializationDecl;
class ClassTemplateSpecializationDecl;
class VarTemplatePartialSpecializationDecl;
class CodeCompleteConsumer;
class CodeCompletionAllocator;
class CodeCompletionTUInfo;
class CodeCompletionResult;
class CoroutineBodyStmt;
class Decl;
class DeclAccessPair;
class DeclContext;
class DeclRefExpr;
class DeclaratorDecl;
class DeducedTemplateArgument;
class DependentDiagnostic;
class DesignatedInitExpr;
class Designation;
class EnableIfAttr;
class EnumConstantDecl;
class Expr;
class ExtVectorType;
class FormatAttr;
class FriendDecl;
class FunctionDecl;
class FunctionProtoType;
class FunctionTemplateDecl;
class ImplicitConversionSequence;
typedef MutableArrayRef<ImplicitConversionSequence> ConversionSequenceList;
class InitListExpr;
class InitializationKind;
class InitializationSequence;
class InitializedEntity;
class IntegerLiteral;
class LabelStmt;
class LambdaExpr;
class LangOptions;
class LocalInstantiationScope;
class LookupResult;
class MacroInfo;
typedef ArrayRef<std::pair<IdentifierInfo *, SourceLocation>> ModuleIdPath;
class ModuleLoader;
class MultiLevelTemplateArgumentList;
class NamedDecl;
class ObjCCategoryDecl;
class ObjCCategoryImplDecl;
class ObjCCompatibleAliasDecl;
class ObjCContainerDecl;
class ObjCImplDecl;
class ObjCImplementationDecl;
class ObjCInterfaceDecl;
class ObjCIvarDecl;
template <class T> class ObjCList;
class ObjCMessageExpr;
class ObjCMethodDecl;
class ObjCPropertyDecl;
class ObjCProtocolDecl;
class OMPThreadPrivateDecl;
class OMPRequiresDecl;
class OMPDeclareReductionDecl;
class OMPDeclareSimdDecl;
class OMPClause;
struct OMPVarListLocTy;
struct OverloadCandidate;
class OverloadCandidateSet;
class OverloadExpr;
class ParenListExpr;
class ParmVarDecl;
class Preprocessor;
class PseudoDestructorTypeStorage;
class PseudoObjectExpr;
class QualType;
class StandardConversionSequence;
class Stmt;
class StringLiteral;
class SwitchStmt;
class TemplateArgument;
class TemplateArgumentList;
class TemplateArgumentLoc;
class TemplateDecl;
class TemplateInstantiationCallback;
class TemplateParameterList;
class TemplatePartialOrderingContext;
class TemplateTemplateParmDecl;
class Token;
class TypeAliasDecl;
class TypedefDecl;
class TypedefNameDecl;
class TypeLoc;
class TypoCorrectionConsumer;
class UnqualifiedId;
class UnresolvedLookupExpr;
class UnresolvedMemberExpr;
class UnresolvedSetImpl;
class UnresolvedSetIterator;
class UsingDecl;
class UsingShadowDecl;
class ValueDecl;
class VarDecl;
class VarTemplateSpecializationDecl;
class VisibilityAttr;
class VisibleDeclConsumer;
class IndirectFieldDecl;
struct DeductionFailureInfo;
class TemplateSpecCandidateSet;
namespace sema {
class AccessedEntity;
class BlockScopeInfo;
class Capture;
class CapturedRegionScopeInfo;
class CapturingScopeInfo;
class CompoundScopeInfo;
class DelayedDiagnostic;
class DelayedDiagnosticPool;
class FunctionScopeInfo;
class LambdaScopeInfo;
class PossiblyUnreachableDiag;
class SemaPPCallbacks;
class TemplateDeductionInfo;
}
namespace threadSafety {
class BeforeSet;
void threadSafetyCleanup(BeforeSet* Cache);
}
// FIXME: No way to easily map from TemplateTypeParmTypes to
// TemplateTypeParmDecls, so we have this horrible PointerUnion.
typedef std::pair<llvm::PointerUnion<const TemplateTypeParmType*, NamedDecl*>,
SourceLocation> UnexpandedParameterPack;
/// Describes whether we've seen any nullability information for the given
/// file.
struct FileNullability {
/// The first pointer declarator (of any pointer kind) in the file that does
/// not have a corresponding nullability annotation.
SourceLocation PointerLoc;
/// The end location for the first pointer declarator in the file. Used for
/// placing fix-its.
SourceLocation PointerEndLoc;
/// Which kind of pointer declarator we saw.
uint8_t PointerKind;
/// Whether we saw any type nullability annotations in the given file.
bool SawTypeNullability = false;
};
/// A mapping from file IDs to a record of whether we've seen nullability
/// information in that file.
class FileNullabilityMap {
/// A mapping from file IDs to the nullability information for each file ID.
llvm::DenseMap<FileID, FileNullability> Map;
/// A single-element cache based on the file ID.
struct {
FileID File;
FileNullability Nullability;
} Cache;
public:
FileNullability &operator[](FileID file) {
// Check the single-element cache.
if (file == Cache.File)
return Cache.Nullability;
// It's not in the single-element cache; flush the cache if we have one.
if (!Cache.File.isInvalid()) {
Map[Cache.File] = Cache.Nullability;
}
// Pull this entry into the cache.
Cache.File = file;
Cache.Nullability = Map[file];
return Cache.Nullability;
}
};
/// Keeps track of expected type during expression parsing. The type is tied to
/// a particular token, all functions that update or consume the type take a
/// start location of the token they are looking at as a parameter. This allows
/// to avoid updating the type on hot paths in the parser.
class PreferredTypeBuilder {
public:
PreferredTypeBuilder() = default;
explicit PreferredTypeBuilder(QualType Type) : Type(Type) {}
void enterCondition(Sema &S, SourceLocation Tok);
void enterReturn(Sema &S, SourceLocation Tok);
void enterVariableInit(SourceLocation Tok, Decl *D);
/// Computing a type for the function argument may require running
/// overloading, so we postpone its computation until it is actually needed.
///
/// Clients should be very careful when using this funciton, as it stores a
/// function_ref, clients should make sure all calls to get() with the same
/// location happen while function_ref is alive.
void enterFunctionArgument(SourceLocation Tok,
llvm::function_ref<QualType()> ComputeType);
void enterParenExpr(SourceLocation Tok, SourceLocation LParLoc);
void enterUnary(Sema &S, SourceLocation Tok, tok::TokenKind OpKind,
SourceLocation OpLoc);
void enterBinary(Sema &S, SourceLocation Tok, Expr *LHS, tok::TokenKind Op);
void enterMemAccess(Sema &S, SourceLocation Tok, Expr *Base);
void enterSubscript(Sema &S, SourceLocation Tok, Expr *LHS);
/// Handles all type casts, including C-style cast, C++ casts, etc.
void enterTypeCast(SourceLocation Tok, QualType CastType);
QualType get(SourceLocation Tok) const {
if (Tok != ExpectedLoc)
return QualType();
if (!Type.isNull())
return Type;
if (ComputeType)
return ComputeType();
return QualType();
}
private:
/// Start position of a token for which we store expected type.
SourceLocation ExpectedLoc;
/// Expected type for a token starting at ExpectedLoc.
QualType Type;
/// A function to compute expected type at ExpectedLoc. It is only considered
/// if Type is null.
llvm::function_ref<QualType()> ComputeType;
};
/// Sema - This implements semantic analysis and AST building for C.
class Sema {
Sema(const Sema &) = delete;
void operator=(const Sema &) = delete;
///Source of additional semantic information.
ExternalSemaSource *ExternalSource;
///Whether Sema has generated a multiplexer and has to delete it.
bool isMultiplexExternalSource;
static bool mightHaveNonExternalLinkage(const DeclaratorDecl *FD);
bool isVisibleSlow(const NamedDecl *D);
/// Determine whether two declarations should be linked together, given that
/// the old declaration might not be visible and the new declaration might
/// not have external linkage.
bool shouldLinkPossiblyHiddenDecl(const NamedDecl *Old,
const NamedDecl *New) {
if (isVisible(Old))
return true;
// See comment in below overload for why it's safe to compute the linkage
// of the new declaration here.
if (New->isExternallyDeclarable()) {
assert(Old->isExternallyDeclarable() &&
"should not have found a non-externally-declarable previous decl");
return true;
}
return false;
}
bool shouldLinkPossiblyHiddenDecl(LookupResult &Old, const NamedDecl *New);
void setupImplicitSpecialMemberType(CXXMethodDecl *SpecialMem,
QualType ResultTy,
ArrayRef<QualType> Args);
public:
typedef OpaquePtr<DeclGroupRef> DeclGroupPtrTy;
typedef OpaquePtr<TemplateName> TemplateTy;
typedef OpaquePtr<QualType> TypeTy;
OpenCLOptions OpenCLFeatures;
FPOptions FPFeatures;
const LangOptions &LangOpts;
Preprocessor &PP;
ASTContext &Context;
ASTConsumer &Consumer;
DiagnosticsEngine &Diags;
SourceManager &SourceMgr;
/// Flag indicating whether or not to collect detailed statistics.
bool CollectStats;
/// Code-completion consumer.
CodeCompleteConsumer *CodeCompleter;
/// CurContext - This is the current declaration context of parsing.
DeclContext *CurContext;
/// Generally null except when we temporarily switch decl contexts,
/// like in \see ActOnObjCTemporaryExitContainerContext.
DeclContext *OriginalLexicalContext;
/// VAListTagName - The declaration name corresponding to __va_list_tag.
/// This is used as part of a hack to omit that class from ADL results.
DeclarationName VAListTagName;
bool MSStructPragmaOn; // True when \#pragma ms_struct on
/// Controls member pointer representation format under the MS ABI.
LangOptions::PragmaMSPointersToMembersKind
MSPointerToMemberRepresentationMethod;
/// Stack of active SEH __finally scopes. Can be empty.
SmallVector<Scope*, 2> CurrentSEHFinally;
/// Source location for newly created implicit MSInheritanceAttrs
SourceLocation ImplicitMSInheritanceAttrLoc;
/// pragma clang section kind
enum PragmaClangSectionKind {
PCSK_Invalid = 0,
PCSK_BSS = 1,
PCSK_Data = 2,
PCSK_Rodata = 3,
PCSK_Text = 4
};
enum PragmaClangSectionAction {
PCSA_Set = 0,
PCSA_Clear = 1
};
struct PragmaClangSection {
std::string SectionName;
bool Valid = false;
SourceLocation PragmaLocation;
void Act(SourceLocation PragmaLocation,
PragmaClangSectionAction Action,
StringLiteral* Name);
};
PragmaClangSection PragmaClangBSSSection;
PragmaClangSection PragmaClangDataSection;
PragmaClangSection PragmaClangRodataSection;
PragmaClangSection PragmaClangTextSection;
enum PragmaMsStackAction {
PSK_Reset = 0x0, // #pragma ()
PSK_Set = 0x1, // #pragma (value)
PSK_Push = 0x2, // #pragma (push[, id])
PSK_Pop = 0x4, // #pragma (pop[, id])
PSK_Show = 0x8, // #pragma (show) -- only for "pack"!
PSK_Push_Set = PSK_Push | PSK_Set, // #pragma (push[, id], value)
PSK_Pop_Set = PSK_Pop | PSK_Set, // #pragma (pop[, id], value)
};
template<typename ValueType>
struct PragmaStack {
struct Slot {
llvm::StringRef StackSlotLabel;
ValueType Value;
SourceLocation PragmaLocation;
SourceLocation PragmaPushLocation;
Slot(llvm::StringRef StackSlotLabel, ValueType Value,
SourceLocation PragmaLocation, SourceLocation PragmaPushLocation)
: StackSlotLabel(StackSlotLabel), Value(Value),
PragmaLocation(PragmaLocation),
PragmaPushLocation(PragmaPushLocation) {}
};
void Act(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
ValueType Value);
// MSVC seems to add artificial slots to #pragma stacks on entering a C++
// method body to restore the stacks on exit, so it works like this:
//
// struct S {
// #pragma <name>(push, InternalPragmaSlot, <current_pragma_value>)
// void Method {}
// #pragma <name>(pop, InternalPragmaSlot)
// };
//
// It works even with #pragma vtordisp, although MSVC doesn't support
// #pragma vtordisp(push [, id], n)
// syntax.
//
// Push / pop a named sentinel slot.
void SentinelAction(PragmaMsStackAction Action, StringRef Label) {
assert((Action == PSK_Push || Action == PSK_Pop) &&
"Can only push / pop #pragma stack sentinels!");
Act(CurrentPragmaLocation, Action, Label, CurrentValue);
}
// Constructors.
explicit PragmaStack(const ValueType &Default)
: DefaultValue(Default), CurrentValue(Default) {}
bool hasValue() const { return CurrentValue != DefaultValue; }
SmallVector<Slot, 2> Stack;
ValueType DefaultValue; // Value used for PSK_Reset action.
ValueType CurrentValue;
SourceLocation CurrentPragmaLocation;
};
// FIXME: We should serialize / deserialize these if they occur in a PCH (but
// we shouldn't do so if they're in a module).
/// Whether to insert vtordisps prior to virtual bases in the Microsoft
/// C++ ABI. Possible values are 0, 1, and 2, which mean:
///
/// 0: Suppress all vtordisps
/// 1: Insert vtordisps in the presence of vbase overrides and non-trivial
/// structors
/// 2: Always insert vtordisps to support RTTI on partially constructed
/// objects
PragmaStack<MSVtorDispAttr::Mode> VtorDispStack;
// #pragma pack.
// Sentinel to represent when the stack is set to mac68k alignment.
static const unsigned kMac68kAlignmentSentinel = ~0U;
PragmaStack<unsigned> PackStack;
// The current #pragma pack values and locations at each #include.
struct PackIncludeState {
unsigned CurrentValue;
SourceLocation CurrentPragmaLocation;
bool HasNonDefaultValue, ShouldWarnOnInclude;
};
SmallVector<PackIncludeState, 8> PackIncludeStack;
// Segment #pragmas.
PragmaStack<StringLiteral *> DataSegStack;
PragmaStack<StringLiteral *> BSSSegStack;
PragmaStack<StringLiteral *> ConstSegStack;
PragmaStack<StringLiteral *> CodeSegStack;
// RAII object to push / pop sentinel slots for all MS #pragma stacks.
// Actions should be performed only if we enter / exit a C++ method body.
class PragmaStackSentinelRAII {
public:
PragmaStackSentinelRAII(Sema &S, StringRef SlotLabel, bool ShouldAct);
~PragmaStackSentinelRAII();
private:
Sema &S;
StringRef SlotLabel;
bool ShouldAct;
};
/// A mapping that describes the nullability we've seen in each header file.
FileNullabilityMap NullabilityMap;
/// Last section used with #pragma init_seg.
StringLiteral *CurInitSeg;
SourceLocation CurInitSegLoc;
/// VisContext - Manages the stack for \#pragma GCC visibility.
void *VisContext; // Really a "PragmaVisStack*"
/// This an attribute introduced by \#pragma clang attribute.
struct PragmaAttributeEntry {
SourceLocation Loc;
ParsedAttr *Attribute;
SmallVector<attr::SubjectMatchRule, 4> MatchRules;
bool IsUsed;
};
/// A push'd group of PragmaAttributeEntries.
struct PragmaAttributeGroup {
/// The location of the push attribute.
SourceLocation Loc;
/// The namespace of this push group.
const IdentifierInfo *Namespace;
SmallVector<PragmaAttributeEntry, 2> Entries;
};
SmallVector<PragmaAttributeGroup, 2> PragmaAttributeStack;
/// The declaration that is currently receiving an attribute from the
/// #pragma attribute stack.
const Decl *PragmaAttributeCurrentTargetDecl;
/// This represents the last location of a "#pragma clang optimize off"
/// directive if such a directive has not been closed by an "on" yet. If
/// optimizations are currently "on", this is set to an invalid location.
SourceLocation OptimizeOffPragmaLocation;
/// Flag indicating if Sema is building a recovery call expression.
///
/// This flag is used to avoid building recovery call expressions
/// if Sema is already doing so, which would cause infinite recursions.
bool IsBuildingRecoveryCallExpr;
/// Used to control the generation of ExprWithCleanups.
CleanupInfo Cleanup;
/// ExprCleanupObjects - This is the stack of objects requiring
/// cleanup that are created by the current full expression. The
/// element type here is ExprWithCleanups::Object.
SmallVector<BlockDecl*, 8> ExprCleanupObjects;
/// Store a set of either DeclRefExprs or MemberExprs that contain a reference
/// to a variable (constant) that may or may not be odr-used in this Expr, and
/// we won't know until all lvalue-to-rvalue and discarded value conversions
/// have been applied to all subexpressions of the enclosing full expression.
/// This is cleared at the end of each full expression.
using MaybeODRUseExprSet = llvm::SmallPtrSet<Expr *, 2>;
MaybeODRUseExprSet MaybeODRUseExprs;
std::unique_ptr<sema::FunctionScopeInfo> CachedFunctionScope;
/// True if the current expression is a member bounds expression
/// for a structure. Member bounds expressions can only reference
/// members and cannot reference variables.
bool IsMemberBoundsExpr;
std::unique_ptr<sema::FunctionScopeInfo> PreallocatedFunctionScope;
/// Stack containing information about each of the nested
/// function, block, and method scopes that are currently active.
SmallVector<sema::FunctionScopeInfo *, 4> FunctionScopes;
typedef LazyVector<TypedefNameDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadExtVectorDecls, 2, 2>
ExtVectorDeclsType;
/// ExtVectorDecls - This is a list all the extended vector types. This allows
/// us to associate a raw vector type with one of the ext_vector type names.
/// This is only necessary for issuing pretty diagnostics.
ExtVectorDeclsType ExtVectorDecls;
/// FieldCollector - Collects CXXFieldDecls during parsing of C++ classes.
std::unique_ptr<CXXFieldCollector> FieldCollector;
typedef llvm::SmallSetVector<NamedDecl *, 16> NamedDeclSetType;
/// Set containing all declared private fields that are not used.
NamedDeclSetType UnusedPrivateFields;
/// Set containing all typedefs that are likely unused.
llvm::SmallSetVector<const TypedefNameDecl *, 4>
UnusedLocalTypedefNameCandidates;
/// Delete-expressions to be analyzed at the end of translation unit
///
/// This list contains class members, and locations of delete-expressions
/// that could not be proven as to whether they mismatch with new-expression
/// used in initializer of the field.
typedef std::pair<SourceLocation, bool> DeleteExprLoc;
typedef llvm::SmallVector<DeleteExprLoc, 4> DeleteLocs;
llvm::MapVector<FieldDecl *, DeleteLocs> DeleteExprs;
typedef llvm::SmallPtrSet<const CXXRecordDecl*, 8> RecordDeclSetTy;
/// PureVirtualClassDiagSet - a set of class declarations which we have
/// emitted a list of pure virtual functions. Used to prevent emitting the
/// same list more than once.
std::unique_ptr<RecordDeclSetTy> PureVirtualClassDiagSet;
/// ParsingInitForAutoVars - a set of declarations with auto types for which
/// we are currently parsing the initializer.
llvm::SmallPtrSet<const Decl*, 4> ParsingInitForAutoVars;
/// Look for a locally scoped extern "C" declaration by the given name.
NamedDecl *findLocallyScopedExternCDecl(DeclarationName Name);
typedef LazyVector<VarDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadTentativeDefinitions, 2, 2>
TentativeDefinitionsType;
/// All the tentative definitions encountered in the TU.
TentativeDefinitionsType TentativeDefinitions;
typedef LazyVector<const DeclaratorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadUnusedFileScopedDecls, 2, 2>
UnusedFileScopedDeclsType;
/// The set of file scoped decls seen so far that have not been used
/// and must warn if not used. Only contains the first declaration.
UnusedFileScopedDeclsType UnusedFileScopedDecls;
typedef LazyVector<CXXConstructorDecl *, ExternalSemaSource,
&ExternalSemaSource::ReadDelegatingConstructors, 2, 2>
DelegatingCtorDeclsType;
/// All the delegating constructors seen so far in the file, used for
/// cycle detection at the end of the TU.
DelegatingCtorDeclsType DelegatingCtorDecls;
/// All the overriding functions seen during a class definition
/// that had their exception spec checks delayed, plus the overridden
/// function.
SmallVector<std::pair<const CXXMethodDecl*, const CXXMethodDecl*>, 2>
DelayedOverridingExceptionSpecChecks;
/// All the function redeclarations seen during a class definition that had
/// their exception spec checks delayed, plus the prior declaration they
/// should be checked against. Except during error recovery, the new decl
/// should always be a friend declaration, as that's the only valid way to
/// redeclare a special member before its class is complete.
SmallVector<std::pair<FunctionDecl*, FunctionDecl*>, 2>
DelayedEquivalentExceptionSpecChecks;
typedef llvm::MapVector<const FunctionDecl *,
std::unique_ptr<LateParsedTemplate>>
LateParsedTemplateMapT;
LateParsedTemplateMapT LateParsedTemplateMap;
/// Callback to the parser to parse templated functions when needed.
typedef void LateTemplateParserCB(void *P, LateParsedTemplate &LPT);
typedef void LateTemplateParserCleanupCB(void *P);
LateTemplateParserCB *LateTemplateParser;
LateTemplateParserCleanupCB *LateTemplateParserCleanup;
void *OpaqueParser;
void SetLateTemplateParser(LateTemplateParserCB *LTP,
LateTemplateParserCleanupCB *LTPCleanup,
void *P) {
LateTemplateParser = LTP;
LateTemplateParserCleanup = LTPCleanup;
OpaqueParser = P;
}
class DelayedDiagnostics;
class DelayedDiagnosticsState {
sema::DelayedDiagnosticPool *SavedPool;
friend class Sema::DelayedDiagnostics;
};
typedef DelayedDiagnosticsState ParsingDeclState;
typedef DelayedDiagnosticsState ProcessingContextState;
/// A class which encapsulates the logic for delaying diagnostics
/// during parsing and other processing.
class DelayedDiagnostics {
/// The current pool of diagnostics into which delayed
/// diagnostics should go.
sema::DelayedDiagnosticPool *CurPool;
public:
DelayedDiagnostics() : CurPool(nullptr) {}
/// Adds a delayed diagnostic.
void add(const sema::DelayedDiagnostic &diag); // in DelayedDiagnostic.h
/// Determines whether diagnostics should be delayed.
bool shouldDelayDiagnostics() { return CurPool != nullptr; }
/// Returns the current delayed-diagnostics pool.
sema::DelayedDiagnosticPool *getCurrentPool() const {
return CurPool;
}
/// Enter a new scope. Access and deprecation diagnostics will be
/// collected in this pool.
DelayedDiagnosticsState push(sema::DelayedDiagnosticPool &pool) {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = &pool;
return state;
}
/// Leave a delayed-diagnostic state that was previously pushed.
/// Do not emit any of the diagnostics. This is performed as part
/// of the bookkeeping of popping a pool "properly".
void popWithoutEmitting(DelayedDiagnosticsState state) {
CurPool = state.SavedPool;
}
/// Enter a new scope where access and deprecation diagnostics are
/// not delayed.
DelayedDiagnosticsState pushUndelayed() {
DelayedDiagnosticsState state;
state.SavedPool = CurPool;
CurPool = nullptr;
return state;
}
/// Undo a previous pushUndelayed().
void popUndelayed(DelayedDiagnosticsState state) {
assert(CurPool == nullptr);
CurPool = state.SavedPool;
}
} DelayedDiagnostics;
/// A RAII object to temporarily push a declaration context.
class ContextRAII {
private:
Sema &S;
DeclContext *SavedContext;
ProcessingContextState SavedContextState;
QualType SavedCXXThisTypeOverride;
public:
ContextRAII(Sema &S, DeclContext *ContextToPush, bool NewThisContext = true)
: S(S), SavedContext(S.CurContext),
SavedContextState(S.DelayedDiagnostics.pushUndelayed()),
SavedCXXThisTypeOverride(S.CXXThisTypeOverride)
{
assert(ContextToPush && "pushing null context");
S.CurContext = ContextToPush;
if (NewThisContext)
S.CXXThisTypeOverride = QualType();
}
void pop() {
if (!SavedContext) return;
S.CurContext = SavedContext;
S.DelayedDiagnostics.popUndelayed(SavedContextState);
S.CXXThisTypeOverride = SavedCXXThisTypeOverride;
SavedContext = nullptr;
}
~ContextRAII() {
pop();
}
};
/// Used to change context to isConstantEvaluated without pushing a heavy
/// ExpressionEvaluationContextRecord object.
bool isConstantEvaluatedOverride;
bool isConstantEvaluated() {
return ExprEvalContexts.back().isConstantEvaluated() ||
isConstantEvaluatedOverride;
}
/// RAII object to handle the state changes required to synthesize
/// a function body.
class SynthesizedFunctionScope {
Sema &S;
Sema::ContextRAII SavedContext;
bool PushedCodeSynthesisContext = false;
public:
SynthesizedFunctionScope(Sema &S, DeclContext *DC)
: S(S), SavedContext(S, DC) {
S.PushFunctionScope();
S.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::PotentiallyEvaluated);
if (auto *FD = dyn_cast<FunctionDecl>(DC))
FD->setWillHaveBody(true);
else
assert(isa<ObjCMethodDecl>(DC));
}
void addContextNote(SourceLocation UseLoc) {
assert(!PushedCodeSynthesisContext);
Sema::CodeSynthesisContext Ctx;
Ctx.Kind = Sema::CodeSynthesisContext::DefiningSynthesizedFunction;
Ctx.PointOfInstantiation = UseLoc;
Ctx.Entity = cast<Decl>(S.CurContext);
S.pushCodeSynthesisContext(Ctx);
PushedCodeSynthesisContext = true;
}
~SynthesizedFunctionScope() {
if (PushedCodeSynthesisContext)
S.popCodeSynthesisContext();
if (auto *FD = dyn_cast<FunctionDecl>(S.CurContext))
FD->setWillHaveBody(false);
S.PopExpressionEvaluationContext();
S.PopFunctionScopeInfo();
}
};
/// WeakUndeclaredIdentifiers - Identifiers contained in
/// \#pragma weak before declared. rare. may alias another
/// identifier, declared or undeclared
llvm::MapVector<IdentifierInfo *, WeakInfo> WeakUndeclaredIdentifiers;
/// ExtnameUndeclaredIdentifiers - Identifiers contained in
/// \#pragma redefine_extname before declared. Used in Solaris system headers
/// to define functions that occur in multiple standards to call the version
/// in the currently selected standard.
llvm::DenseMap<IdentifierInfo*,AsmLabelAttr*> ExtnameUndeclaredIdentifiers;
/// Load weak undeclared identifiers from the external source.
void LoadExternalWeakUndeclaredIdentifiers();
/// WeakTopLevelDecl - Translation-unit scoped declarations generated by
/// \#pragma weak during processing of other Decls.
/// I couldn't figure out a clean way to generate these in-line, so
/// we store them here and handle separately -- which is a hack.
/// It would be best to refactor this.
SmallVector<Decl*,2> WeakTopLevelDecl;
IdentifierResolver IdResolver;
/// Translation Unit Scope - useful to Objective-C actions that need
/// to lookup file scope declarations in the "ordinary" C decl namespace.
/// For example, user-defined classes, built-in "id" type, etc.
Scope *TUScope;
/// The C++ "std" namespace, where the standard library resides.
LazyDeclPtr StdNamespace;
/// The C++ "std::bad_alloc" class, which is defined by the C++
/// standard library.
LazyDeclPtr StdBadAlloc;
/// The C++ "std::align_val_t" enum class, which is defined by the C++
/// standard library.
LazyDeclPtr StdAlignValT;
/// The C++ "std::experimental" namespace, where the experimental parts
/// of the standard library resides.
NamespaceDecl *StdExperimentalNamespaceCache;
/// The C++ "std::initializer_list" template, which is defined in
/// \<initializer_list>.
ClassTemplateDecl *StdInitializerList;
/// The C++ "std::coroutine_traits" template, which is defined in
/// \<coroutine_traits>
ClassTemplateDecl *StdCoroutineTraitsCache;
/// The C++ "type_info" declaration, which is defined in \<typeinfo>.
RecordDecl *CXXTypeInfoDecl;
/// The MSVC "_GUID" struct, which is defined in MSVC header files.
RecordDecl *MSVCGuidDecl;
/// Caches identifiers/selectors for NSFoundation APIs.
std::unique_ptr<NSAPI> NSAPIObj;
/// The declaration of the Objective-C NSNumber class.
ObjCInterfaceDecl *NSNumberDecl;
/// The declaration of the Objective-C NSValue class.
ObjCInterfaceDecl *NSValueDecl;
/// Pointer to NSNumber type (NSNumber *).
QualType NSNumberPointer;
/// Pointer to NSValue type (NSValue *).
QualType NSValuePointer;
/// The Objective-C NSNumber methods used to create NSNumber literals.
ObjCMethodDecl *NSNumberLiteralMethods[NSAPI::NumNSNumberLiteralMethods];
/// The declaration of the Objective-C NSString class.
ObjCInterfaceDecl *NSStringDecl;
/// Pointer to NSString type (NSString *).
QualType NSStringPointer;
/// The declaration of the stringWithUTF8String: method.
ObjCMethodDecl *StringWithUTF8StringMethod;
/// The declaration of the valueWithBytes:objCType: method.
ObjCMethodDecl *ValueWithBytesObjCTypeMethod;
/// The declaration of the Objective-C NSArray class.
ObjCInterfaceDecl *NSArrayDecl;
/// The declaration of the arrayWithObjects:count: method.
ObjCMethodDecl *ArrayWithObjectsMethod;
/// The declaration of the Objective-C NSDictionary class.
ObjCInterfaceDecl *NSDictionaryDecl;
/// The declaration of the dictionaryWithObjects:forKeys:count: method.
ObjCMethodDecl *DictionaryWithObjectsMethod;
/// id<NSCopying> type.
QualType QIDNSCopying;
/// will hold 'respondsToSelector:'
Selector RespondsToSelectorSel;
/// A flag to remember whether the implicit forms of operator new and delete
/// have been declared.
bool GlobalNewDeleteDeclared;
/// A flag to indicate that we're in a context that permits abstract
/// references to fields. This is really a
bool AllowAbstractFieldReference;
/// Describes how the expressions currently being parsed are
/// evaluated at run-time, if at all.
enum class ExpressionEvaluationContext {
/// The current expression and its subexpressions occur within an
/// unevaluated operand (C++11 [expr]p7), such as the subexpression of
/// \c sizeof, where the type of the expression may be significant but
/// no code will be generated to evaluate the value of the expression at
/// run time.
Unevaluated,
/// The current expression occurs within a braced-init-list within
/// an unevaluated operand. This is mostly like a regular unevaluated
/// context, except that we still instantiate constexpr functions that are
/// referenced here so that we can perform narrowing checks correctly.
UnevaluatedList,
/// The current expression occurs within a discarded statement.
/// This behaves largely similarly to an unevaluated operand in preventing
/// definitions from being required, but not in other ways.
DiscardedStatement,
/// The current expression occurs within an unevaluated
/// operand that unconditionally permits abstract references to
/// fields, such as a SIZE operator in MS-style inline assembly.
UnevaluatedAbstract,
/// The current context is "potentially evaluated" in C++11 terms,
/// but the expression is evaluated at compile-time (like the values of
/// cases in a switch statement).
ConstantEvaluated,
/// The current expression is potentially evaluated at run time,
/// which means that code may be generated to evaluate the value of the
/// expression at run time.
PotentiallyEvaluated,
/// The current expression is potentially evaluated, but any
/// declarations referenced inside that expression are only used if
/// in fact the current expression is used.
///
/// This value is used when parsing default function arguments, for which
/// we would like to provide diagnostics (e.g., passing non-POD arguments
/// through varargs) but do not want to mark declarations as "referenced"
/// until the default argument is used.
PotentiallyEvaluatedIfUsed
};
/// Data structure used to record current or nested
/// expression evaluation contexts.
struct ExpressionEvaluationContextRecord {
/// The expression evaluation context.
ExpressionEvaluationContext Context;
/// Whether the enclosing context needed a cleanup.
CleanupInfo ParentCleanup;
/// Whether we are in a decltype expression.
bool IsDecltype;
/// The number of active cleanup objects when we entered
/// this expression evaluation context.
unsigned NumCleanupObjects;
/// The number of typos encountered during this expression evaluation
/// context (i.e. the number of TypoExprs created).
unsigned NumTypos;
MaybeODRUseExprSet SavedMaybeODRUseExprs;
/// The lambdas that are present within this context, if it
/// is indeed an unevaluated context.
SmallVector<LambdaExpr *, 2> Lambdas;
/// The declaration that provides context for lambda expressions
/// and block literals if the normal declaration context does not
/// suffice, e.g., in a default function argument.
Decl *ManglingContextDecl;
/// The context information used to mangle lambda expressions
/// and block literals within this context.
///
/// This mangling information is allocated lazily, since most contexts
/// do not have lambda expressions or block literals.
std::unique_ptr<MangleNumberingContext> MangleNumbering;
/// If we are processing a decltype type, a set of call expressions
/// for which we have deferred checking the completeness of the return type.
SmallVector<CallExpr *, 8> DelayedDecltypeCalls;
/// If we are processing a decltype type, a set of temporary binding
/// expressions for which we have deferred checking the destructor.
SmallVector<CXXBindTemporaryExpr *, 8> DelayedDecltypeBinds;
llvm::SmallPtrSet<const Expr *, 8> PossibleDerefs;
/// \brief Describes whether we are in an expression constext which we have
/// to handle differently.
enum ExpressionKind {
EK_Decltype, EK_TemplateArgument, EK_Other
} ExprContext;
ExpressionEvaluationContextRecord(ExpressionEvaluationContext Context,
unsigned NumCleanupObjects,
CleanupInfo ParentCleanup,
Decl *ManglingContextDecl,
ExpressionKind ExprContext)
: Context(Context), ParentCleanup(ParentCleanup),
NumCleanupObjects(NumCleanupObjects), NumTypos(0),
ManglingContextDecl(ManglingContextDecl), MangleNumbering(),
ExprContext(ExprContext) {}
/// Retrieve the mangling numbering context, used to consistently
/// number constructs like lambdas for mangling.
MangleNumberingContext &getMangleNumberingContext(ASTContext &Ctx);
bool isUnevaluated() const {
return Context == ExpressionEvaluationContext::Unevaluated ||
Context == ExpressionEvaluationContext::UnevaluatedAbstract ||
Context == ExpressionEvaluationContext::UnevaluatedList;
}
bool isConstantEvaluated() const {
return Context == ExpressionEvaluationContext::ConstantEvaluated;
}
};
/// A stack of expression evaluation contexts.
SmallVector<ExpressionEvaluationContextRecord, 8> ExprEvalContexts;
/// Emit a warning for all pending noderef expressions that we recorded.
void WarnOnPendingNoDerefs(ExpressionEvaluationContextRecord &Rec);
/// Compute the mangling number context for a lambda expression or
/// block literal.
///
/// \param DC - The DeclContext containing the lambda expression or
/// block literal.
/// \param[out] ManglingContextDecl - Returns the ManglingContextDecl
/// associated with the context, if relevant.
MangleNumberingContext *getCurrentMangleNumberContext(
const DeclContext *DC,
Decl *&ManglingContextDecl);
/// SpecialMemberOverloadResult - The overloading result for a special member
/// function.
///
/// This is basically a wrapper around PointerIntPair. The lowest bits of the
/// integer are used to determine whether overload resolution succeeded.
class SpecialMemberOverloadResult {
public:
enum Kind {
NoMemberOrDeleted,
Ambiguous,
Success
};
private:
llvm::PointerIntPair<CXXMethodDecl*, 2> Pair;
public:
SpecialMemberOverloadResult() : Pair() {}
SpecialMemberOverloadResult(CXXMethodDecl *MD)
: Pair(MD, MD->isDeleted() ? NoMemberOrDeleted : Success) {}
CXXMethodDecl *getMethod() const { return Pair.getPointer(); }
void setMethod(CXXMethodDecl *MD) { Pair.setPointer(MD); }
Kind getKind() const { return static_cast<Kind>(Pair.getInt()); }
void setKind(Kind K) { Pair.setInt(K); }
};
class SpecialMemberOverloadResultEntry
: public llvm::FastFoldingSetNode,
public SpecialMemberOverloadResult {
public:
SpecialMemberOverloadResultEntry(const llvm::FoldingSetNodeID &ID)
: FastFoldingSetNode(ID)
{}
};
/// A cache of special member function overload resolution results
/// for C++ records.
llvm::FoldingSet<SpecialMemberOverloadResultEntry> SpecialMemberCache;
/// A cache of the flags available in enumerations with the flag_bits
/// attribute.
mutable llvm::DenseMap<const EnumDecl*, llvm::APInt> FlagBitsCache;
/// The kind of translation unit we are processing.
///
/// When we're processing a complete translation unit, Sema will perform
/// end-of-translation-unit semantic tasks (such as creating
/// initializers for tentative definitions in C) once parsing has
/// completed. Modules and precompiled headers perform different kinds of
/// checks.
TranslationUnitKind TUKind;
llvm::BumpPtrAllocator BumpAlloc;
/// The number of SFINAE diagnostics that have been trapped.
unsigned NumSFINAEErrors;
typedef llvm::DenseMap<ParmVarDecl *, llvm::TinyPtrVector<ParmVarDecl *>>
UnparsedDefaultArgInstantiationsMap;
/// A mapping from parameters with unparsed default arguments to the
/// set of instantiations of each parameter.
///
/// This mapping is a temporary data structure used when parsing
/// nested class templates or nested classes of class templates,
/// where we might end up instantiating an inner class before the
/// default arguments of its methods have been parsed.
UnparsedDefaultArgInstantiationsMap UnparsedDefaultArgInstantiations;
// Contains the locations of the beginning of unparsed default
// argument locations.
llvm::DenseMap<ParmVarDecl *, SourceLocation> UnparsedDefaultArgLocs;
/// UndefinedInternals - all the used, undefined objects which require a
/// definition in this translation unit.
llvm::MapVector<NamedDecl *, SourceLocation> UndefinedButUsed;
/// Determine if VD, which must be a variable or function, is an external
/// symbol that nonetheless can't be referenced from outside this translation
/// unit because its type has no linkage and it's not extern "C".
bool isExternalWithNoLinkageType(ValueDecl *VD);
/// Obtain a sorted list of functions that are undefined but ODR-used.
void getUndefinedButUsed(
SmallVectorImpl<std::pair<NamedDecl *, SourceLocation> > &Undefined);
/// Retrieves list of suspicious delete-expressions that will be checked at
/// the end of translation unit.
const llvm::MapVector<FieldDecl *, DeleteLocs> &
getMismatchingDeleteExpressions() const;
typedef std::pair<ObjCMethodList, ObjCMethodList> GlobalMethods;
typedef llvm::DenseMap<Selector, GlobalMethods> GlobalMethodPool;
/// Method Pool - allows efficient lookup when typechecking messages to "id".
/// We need to maintain a list, since selectors can have differing signatures
/// across classes. In Cocoa, this happens to be extremely uncommon (only 1%
/// of selectors are "overloaded").
/// At the head of the list it is recorded whether there were 0, 1, or >= 2
/// methods inside categories with a particular selector.
GlobalMethodPool MethodPool;
/// Method selectors used in a \@selector expression. Used for implementation
/// of -Wselector.
llvm::MapVector<Selector, SourceLocation> ReferencedSelectors;
/// List of SourceLocations where 'self' is implicitly retained inside a
/// block.
llvm::SmallVector<std::pair<SourceLocation, const BlockDecl *>, 1>
ImplicitlyRetainedSelfLocs;
/// Kinds of C++ special members.
enum CXXSpecialMember {
CXXDefaultConstructor,
CXXCopyConstructor,
CXXMoveConstructor,
CXXCopyAssignment,
CXXMoveAssignment,
CXXDestructor,
CXXInvalid
};
typedef llvm::PointerIntPair<CXXRecordDecl *, 3, CXXSpecialMember>
SpecialMemberDecl;
/// The C++ special members which we are currently in the process of
/// declaring. If this process recursively triggers the declaration of the
/// same special member, we should act as if it is not yet declared.
llvm::SmallPtrSet<SpecialMemberDecl, 4> SpecialMembersBeingDeclared;
/// The function definitions which were renamed as part of typo-correction
/// to match their respective declarations. We want to keep track of them
/// to ensure that we don't emit a "redefinition" error if we encounter a
/// correctly named definition after the renamed definition.
llvm::SmallPtrSet<const NamedDecl *, 4> TypoCorrectedFunctionDefinitions;
/// Stack of types that correspond to the parameter entities that are
/// currently being copy-initialized. Can be empty.
llvm::SmallVector<QualType, 4> CurrentParameterCopyTypes;
void ReadMethodPool(Selector Sel);
void updateOutOfDateSelector(Selector Sel);
/// Private Helper predicate to check for 'self'.
bool isSelfExpr(Expr *RExpr);
bool isSelfExpr(Expr *RExpr, const ObjCMethodDecl *Method);
/// Cause the active diagnostic on the DiagosticsEngine to be
/// emitted. This is closely coupled to the SemaDiagnosticBuilder class and
/// should not be used elsewhere.
void EmitCurrentDiagnostic(unsigned DiagID);
/// Records and restores the FP_CONTRACT state on entry/exit of compound
/// statements.
class FPContractStateRAII {
public:
FPContractStateRAII(Sema &S) : S(S), OldFPFeaturesState(S.FPFeatures) {}
~FPContractStateRAII() { S.FPFeatures = OldFPFeaturesState; }
private:
Sema& S;
FPOptions OldFPFeaturesState;
};
void addImplicitTypedef(StringRef Name, QualType T);
public:
Sema(Preprocessor &pp, ASTContext &ctxt, ASTConsumer &consumer,
TranslationUnitKind TUKind = TU_Complete,
CodeCompleteConsumer *CompletionConsumer = nullptr);
~Sema();
/// Perform initialization that occurs after the parser has been
/// initialized but before it parses anything.
void Initialize();
const LangOptions &getLangOpts() const { return LangOpts; }
OpenCLOptions &getOpenCLOptions() { return OpenCLFeatures; }
FPOptions &getFPOptions() { return FPFeatures; }
DiagnosticsEngine &getDiagnostics() const { return Diags; }
SourceManager &getSourceManager() const { return SourceMgr; }
Preprocessor &getPreprocessor() const { return PP; }
ASTContext &getASTContext() const { return Context; }
ASTConsumer &getASTConsumer() const { return Consumer; }
ASTMutationListener *getASTMutationListener() const;
ExternalSemaSource* getExternalSource() const { return ExternalSource; }
///Registers an external source. If an external source already exists,
/// creates a multiplex external source and appends to it.
///
///\param[in] E - A non-null external sema source.
///
void addExternalSource(ExternalSemaSource *E);
void PrintStats() const;
/// Helper class that creates diagnostics with optional
/// template instantiation stacks.
///
/// This class provides a wrapper around the basic DiagnosticBuilder
/// class that emits diagnostics. SemaDiagnosticBuilder is
/// responsible for emitting the diagnostic (as DiagnosticBuilder
/// does) and, if the diagnostic comes from inside a template
/// instantiation, printing the template instantiation stack as
/// well.
class SemaDiagnosticBuilder : public DiagnosticBuilder {
Sema &SemaRef;
unsigned DiagID;
public:
SemaDiagnosticBuilder(DiagnosticBuilder &DB, Sema &SemaRef, unsigned DiagID)
: DiagnosticBuilder(DB), SemaRef(SemaRef), DiagID(DiagID) { }
// This is a cunning lie. DiagnosticBuilder actually performs move
// construction in its copy constructor (but due to varied uses, it's not
// possible to conveniently express this as actual move construction). So
// the default copy ctor here is fine, because the base class disables the
// source anyway, so the user-defined ~SemaDiagnosticBuilder is a safe no-op
// in that case anwyay.
SemaDiagnosticBuilder(const SemaDiagnosticBuilder&) = default;
~SemaDiagnosticBuilder() {
// If we aren't active, there is nothing to do.
if (!isActive()) return;
// Otherwise, we need to emit the diagnostic. First flush the underlying
// DiagnosticBuilder data, and clear the diagnostic builder itself so it
// won't emit the diagnostic in its own destructor.
//
// This seems wasteful, in that as written the DiagnosticBuilder dtor will
// do its own needless checks to see if the diagnostic needs to be
// emitted. However, because we take care to ensure that the builder
// objects never escape, a sufficiently smart compiler will be able to
// eliminate that code.
FlushCounts();
Clear();
// Dispatch to Sema to emit the diagnostic.
SemaRef.EmitCurrentDiagnostic(DiagID);
}
/// Teach operator<< to produce an object of the correct type.
template<typename T>
friend const SemaDiagnosticBuilder &operator<<(
const SemaDiagnosticBuilder &Diag, const T &Value) {
const DiagnosticBuilder &BaseDiag = Diag;
BaseDiag << Value;
return Diag;
}
};
/// Emit a diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, unsigned DiagID) {
DiagnosticBuilder DB = Diags.Report(Loc, DiagID);
return SemaDiagnosticBuilder(DB, *this, DiagID);
}
/// Emit a partial diagnostic.
SemaDiagnosticBuilder Diag(SourceLocation Loc, const PartialDiagnostic& PD);
/// Build a partial diagnostic.
PartialDiagnostic PDiag(unsigned DiagID = 0); // in SemaInternal.h
bool findMacroSpelling(SourceLocation &loc, StringRef name);
/// Get a string to suggest for zero-initialization of a type.
std::string
getFixItZeroInitializerForType(QualType T, SourceLocation Loc) const;
std::string getFixItZeroLiteralForType(QualType T, SourceLocation Loc) const;
/// Calls \c Lexer::getLocForEndOfToken()
SourceLocation getLocForEndOfToken(SourceLocation Loc, unsigned Offset = 0);
/// Retrieve the module loader associated with the preprocessor.
ModuleLoader &getModuleLoader() const;
void emitAndClearUnusedLocalTypedefWarnings();
enum TUFragmentKind {
/// The global module fragment, between 'module;' and a module-declaration.
Global,
/// A normal translation unit fragment. For a non-module unit, this is the
/// entire translation unit. Otherwise, it runs from the module-declaration
/// to the private-module-fragment (if any) or the end of the TU (if not).
Normal,
/// The private module fragment, between 'module :private;' and the end of
/// the translation unit.
Private
};
void ActOnStartOfTranslationUnit();
void ActOnEndOfTranslationUnit();
void ActOnEndOfTranslationUnitFragment(TUFragmentKind Kind);
void CheckDelegatingCtorCycles();
Scope *getScopeForContext(DeclContext *Ctx);
void PushFunctionScope();
void PushBlockScope(Scope *BlockScope, BlockDecl *Block);
sema::LambdaScopeInfo *PushLambdaScope();
/// This is used to inform Sema what the current TemplateParameterDepth
/// is during Parsing. Currently it is used to pass on the depth
/// when parsing generic lambda 'auto' parameters.
void RecordParsingTemplateParameterDepth(unsigned Depth);
void PushCapturedRegionScope(Scope *RegionScope, CapturedDecl *CD,
RecordDecl *RD,
CapturedRegionKind K);
/// Custom deleter to allow FunctionScopeInfos to be kept alive for a short
/// time after they've been popped.
class PoppedFunctionScopeDeleter {
Sema *Self;
public:
explicit PoppedFunctionScopeDeleter(Sema *Self) : Self(Self) {}
void operator()(sema::FunctionScopeInfo *Scope) const;
};
using PoppedFunctionScopePtr =
std::unique_ptr<sema::FunctionScopeInfo, PoppedFunctionScopeDeleter>;
PoppedFunctionScopePtr
PopFunctionScopeInfo(const sema::AnalysisBasedWarnings::Policy *WP = nullptr,
const Decl *D = nullptr,
QualType BlockType = QualType());
sema::FunctionScopeInfo *getCurFunction() const {
return FunctionScopes.empty() ? nullptr : FunctionScopes.back();
}
sema::FunctionScopeInfo *getEnclosingFunction() const;
void setFunctionHasBranchIntoScope();
void setFunctionHasBranchProtectedScope();
void setFunctionHasIndirectGoto();
void PushCompoundScope(bool IsStmtExpr);
void PopCompoundScope();
sema::CompoundScopeInfo &getCurCompoundScope() const;
bool hasAnyUnrecoverableErrorsInThisFunction() const;
/// Retrieve the current block, if any.
sema::BlockScopeInfo *getCurBlock();
/// Retrieve the current lambda scope info, if any.
/// \param IgnoreNonLambdaCapturingScope true if should find the top-most
/// lambda scope info ignoring all inner capturing scopes that are not
/// lambda scopes.
sema::LambdaScopeInfo *
getCurLambda(bool IgnoreNonLambdaCapturingScope = false);
/// Retrieve the current generic lambda info, if any.
sema::LambdaScopeInfo *getCurGenericLambda();
/// Retrieve the current captured region, if any.
sema::CapturedRegionScopeInfo *getCurCapturedRegion();
/// WeakTopLevelDeclDecls - access to \#pragma weak-generated Decls
SmallVectorImpl<Decl *> &WeakTopLevelDecls() { return WeakTopLevelDecl; }
void ActOnComment(SourceRange Comment);
//===--------------------------------------------------------------------===//
// Type Analysis / Processing: SemaType.cpp.
//
QualType BuildQualifiedType(QualType T, SourceLocation Loc, Qualifiers Qs,
const DeclSpec *DS = nullptr);
QualType BuildQualifiedType(QualType T, SourceLocation Loc, unsigned CVRA,
const DeclSpec *DS = nullptr);
QualType BuildPointerType(QualType T, CheckedPointerKind kind,
SourceLocation Loc, DeclarationName Entity);
QualType BuildReferenceType(QualType T, bool LValueRef,
SourceLocation Loc, DeclarationName Entity);
QualType BuildArrayType(QualType T, ArrayType::ArraySizeModifier ASM,
Expr *ArraySize, unsigned Quals,
CheckedArrayKind Kind, SourceRange Brackets,
DeclarationName Entity);
QualType BuildVectorType(QualType T, Expr *VecSize, SourceLocation AttrLoc);
QualType BuildExtVectorType(QualType T, Expr *ArraySize,
SourceLocation AttrLoc);
QualType BuildAddressSpaceAttr(QualType &T, LangAS ASIdx, Expr *AddrSpace,
SourceLocation AttrLoc);
/// Same as above, but constructs the AddressSpace index if not provided.
QualType BuildAddressSpaceAttr(QualType &T, Expr *AddrSpace,
SourceLocation AttrLoc);
bool CheckFunctionReturnType(QualType T, SourceLocation Loc);
/// Build a function type.
///
/// This routine checks the function type according to C++ rules and
/// under the assumption that the result type and parameter types have
/// just been instantiated from a template. It therefore duplicates
/// some of the behavior of GetTypeForDeclarator, but in a much
/// simpler form that is only suitable for this narrow use case.
///
/// \param T The return type of the function.
///
/// \param ParamTypes The parameter types of the function. This array
/// will be modified to account for adjustments to the types of the
/// function parameters.
///
/// \param Loc The location of the entity whose type involves this
/// function type or, if there is no such entity, the location of the
/// type that will have function type.
///
/// \param Entity The name of the entity that involves the function
/// type, if known.
///
/// \param EPI Extra information about the function type. Usually this will
/// be taken from an existing function with the same prototype.
///
/// \returns A suitable function type, if there are no errors. The
/// unqualified type will always be a FunctionProtoType.
/// Otherwise, returns a NULL type.
QualType BuildFunctionType(QualType T,
MutableArrayRef<QualType> ParamTypes,
SourceLocation Loc, DeclarationName Entity,
const FunctionProtoType::ExtProtoInfo &EPI);
QualType BuildMemberPointerType(QualType T, QualType Class,
SourceLocation Loc,
DeclarationName Entity);
QualType BuildBlockPointerType(QualType T,
SourceLocation Loc, DeclarationName Entity);
QualType BuildParenType(QualType T);
QualType BuildAtomicType(QualType T, SourceLocation Loc);
QualType BuildReadPipeType(QualType T,
SourceLocation Loc);
QualType BuildWritePipeType(QualType T,
SourceLocation Loc);
TypeSourceInfo *GetTypeForDeclarator(Declarator &D, Scope *S);
TypeSourceInfo *GetTypeForDeclaratorCast(Declarator &D, QualType FromTy);
/// Package the given type and TSI into a ParsedType.
ParsedType CreateParsedType(QualType T, TypeSourceInfo *TInfo);
DeclarationNameInfo GetNameForDeclarator(Declarator &D);
DeclarationNameInfo GetNameFromUnqualifiedId(const UnqualifiedId &Name);
static QualType GetTypeFromParser(ParsedType Ty,
TypeSourceInfo **TInfo = nullptr);
CanThrowResult canThrow(const Expr *E);
const FunctionProtoType *ResolveExceptionSpec(SourceLocation Loc,
const FunctionProtoType *FPT);
void UpdateExceptionSpec(FunctionDecl *FD,
const FunctionProtoType::ExceptionSpecInfo &ESI);
bool CheckSpecifiedExceptionType(QualType &T, SourceRange Range);
bool CheckDistantExceptionSpec(QualType T);
bool CheckEquivalentExceptionSpec(FunctionDecl *Old, FunctionDecl *New);
bool CheckEquivalentExceptionSpec(
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool CheckEquivalentExceptionSpec(
const PartialDiagnostic &DiagID, const PartialDiagnostic & NoteID,
const FunctionProtoType *Old, SourceLocation OldLoc,
const FunctionProtoType *New, SourceLocation NewLoc);
bool handlerCanCatch(QualType HandlerType, QualType ExceptionType);
bool CheckExceptionSpecSubset(const PartialDiagnostic &DiagID,
const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const PartialDiagnostic &NoThrowDiagID,
const FunctionProtoType *Superset,
SourceLocation SuperLoc,
const FunctionProtoType *Subset,
SourceLocation SubLoc);
bool CheckParamExceptionSpec(const PartialDiagnostic &NestedDiagID,
const PartialDiagnostic &NoteID,
const FunctionProtoType *Target,
SourceLocation TargetLoc,
const FunctionProtoType *Source,
SourceLocation SourceLoc);
TypeResult ActOnTypeName(Scope *S, Declarator &D);
/// The parser has parsed the context-sensitive type 'instancetype'
/// in an Objective-C message declaration. Return the appropriate type.
ParsedType ActOnObjCInstanceType(SourceLocation Loc);
/// Abstract class used to diagnose incomplete types.
struct TypeDiagnoser {
TypeDiagnoser() {}
virtual void diagnose(Sema &S, SourceLocation Loc, QualType T) = 0;
virtual ~TypeDiagnoser() {}
};
static int getPrintable(int I) { return I; }
static unsigned getPrintable(unsigned I) { return I; }
static bool getPrintable(bool B) { return B; }
static const char * getPrintable(const char *S) { return S; }
static StringRef getPrintable(StringRef S) { return S; }
static const std::string &getPrintable(const std::string &S) { return S; }
static const IdentifierInfo *getPrintable(const IdentifierInfo *II) {
return II;
}
static DeclarationName getPrintable(DeclarationName N) { return N; }
static QualType getPrintable(QualType T) { return T; }
static SourceRange getPrintable(SourceRange R) { return R; }
static SourceRange getPrintable(SourceLocation L) { return L; }
static SourceRange getPrintable(const Expr *E) { return E->getSourceRange(); }
static SourceRange getPrintable(TypeLoc TL) { return TL.getSourceRange();}
template <typename... Ts> class BoundTypeDiagnoser : public TypeDiagnoser {
unsigned DiagID;
std::tuple<const Ts &...> Args;
template <std::size_t... Is>
void emit(const SemaDiagnosticBuilder &DB,
llvm::index_sequence<Is...>) const {
// Apply all tuple elements to the builder in order.
bool Dummy[] = {false, (DB << getPrintable(std::get<Is>(Args)))...};
(void)Dummy;
}
public:
BoundTypeDiagnoser(unsigned DiagID, const Ts &...Args)
: TypeDiagnoser(), DiagID(DiagID), Args(Args...) {
assert(DiagID != 0 && "no diagnostic for type diagnoser");
}
void diagnose(Sema &S, SourceLocation Loc, QualType T) override {
const SemaDiagnosticBuilder &DB = S.Diag(Loc, DiagID);
emit(DB, llvm::index_sequence_for<Ts...>());
DB << T;
}
};
private:
/// Methods for marking which expressions involve dereferencing a pointer
/// marked with the 'noderef' attribute. Expressions are checked bottom up as
/// they are parsed, meaning that a noderef pointer may not be accessed. For
/// example, in `&*p` where `p` is a noderef pointer, we will first parse the
/// `*p`, but need to check that `address of` is called on it. This requires
/// keeping a container of all pending expressions and checking if the address
/// of them are eventually taken.
void CheckSubscriptAccessOfNoDeref(const ArraySubscriptExpr *E);
void CheckAddressOfNoDeref(const Expr *E);
void CheckMemberAccessOfNoDeref(const MemberExpr *E);
bool RequireCompleteTypeImpl(SourceLocation Loc, QualType T,
TypeDiagnoser *Diagnoser);
struct ModuleScope {
SourceLocation BeginLoc;
clang::Module *Module = nullptr;
bool ModuleInterface = false;
bool ImplicitGlobalModuleFragment = false;
VisibleModuleSet OuterVisibleModules;
};
/// The modules we're currently parsing.
llvm::SmallVector<ModuleScope, 16> ModuleScopes;
/// Namespace definitions that we will export when they finish.
llvm::SmallPtrSet<const NamespaceDecl*, 8> DeferredExportedNamespaces;
/// Get the module whose scope we are currently within.
Module *getCurrentModule() const {
return ModuleScopes.empty() ? nullptr : ModuleScopes.back().Module;
}
VisibleModuleSet VisibleModules;
public:
/// Get the module owning an entity.
Module *getOwningModule(Decl *Entity) { return Entity->getOwningModule(); }
/// Make a merged definition of an existing hidden definition \p ND
/// visible at the specified location.
void makeMergedDefinitionVisible(NamedDecl *ND);
bool isModuleVisible(const Module *M, bool ModulePrivate = false);
/// Determine whether a declaration is visible to name lookup.
bool isVisible(const NamedDecl *D) {
return !D->isHidden() || isVisibleSlow(D);
}
/// Determine whether any declaration of an entity is visible.
bool
hasVisibleDeclaration(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr) {
return isVisible(D) || hasVisibleDeclarationSlow(D, Modules);
}
bool hasVisibleDeclarationSlow(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules);
bool hasVisibleMergedDefinition(NamedDecl *Def);
bool hasMergedDefinitionInCurrentModule(NamedDecl *Def);
/// Determine if \p D and \p Suggested have a structurally compatible
/// layout as described in C11 6.2.7/1.
bool hasStructuralCompatLayout(Decl *D, Decl *Suggested);
/// Determine if \p D has a visible definition. If not, suggest a declaration
/// that should be made visible to expose the definition.
bool hasVisibleDefinition(NamedDecl *D, NamedDecl **Suggested,
bool OnlyNeedComplete = false);
bool hasVisibleDefinition(const NamedDecl *D) {
NamedDecl *Hidden;
return hasVisibleDefinition(const_cast<NamedDecl*>(D), &Hidden);
}
/// Determine if the template parameter \p D has a visible default argument.
bool
hasVisibleDefaultArgument(const NamedDecl *D,
llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is an explicit
/// specialization declaration for a specialization of a template. (For a
/// member specialization, use hasVisibleMemberSpecialization.)
bool hasVisibleExplicitSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if there is a visible declaration of \p D that is a member
/// specialization declaration (as opposed to an instantiated declaration).
bool hasVisibleMemberSpecialization(
const NamedDecl *D, llvm::SmallVectorImpl<Module *> *Modules = nullptr);
/// Determine if \p A and \p B are equivalent internal linkage declarations
/// from different modules, and thus an ambiguity error can be downgraded to
/// an extension warning.
bool isEquivalentInternalLinkageDeclaration(const NamedDecl *A,
const NamedDecl *B);
void diagnoseEquivalentInternalLinkageDeclarations(
SourceLocation Loc, const NamedDecl *D,
ArrayRef<const NamedDecl *> Equiv);
bool isUsualDeallocationFunction(const CXXMethodDecl *FD);
bool isCompleteType(SourceLocation Loc, QualType T) {
return !RequireCompleteTypeImpl(Loc, T, nullptr);
}
bool RequireCompleteType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireCompleteType(SourceLocation Loc, QualType T,
unsigned DiagID);
template <typename... Ts>
bool RequireCompleteType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteType(Loc, T, Diagnoser);
}
void completeExprArrayBound(Expr *E);
bool RequireCompleteExprType(Expr *E, TypeDiagnoser &Diagnoser);
bool RequireCompleteExprType(Expr *E, unsigned DiagID);
template <typename... Ts>
bool RequireCompleteExprType(Expr *E, unsigned DiagID, const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireCompleteExprType(E, Diagnoser);
}
bool RequireLiteralType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID);
template <typename... Ts>
bool RequireLiteralType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireLiteralType(Loc, T, Diagnoser);
}
QualType getElaboratedType(ElaboratedTypeKeyword Keyword,
const CXXScopeSpec &SS, QualType T,
TagDecl *OwnedTagDecl = nullptr);
QualType BuildTypeofExprType(Expr *E, SourceLocation Loc);
/// If AsUnevaluated is false, E is treated as though it were an evaluated
/// context, such as when building a type for decltype(auto).
QualType BuildDecltypeType(Expr *E, SourceLocation Loc,
bool AsUnevaluated = true);
QualType BuildUnaryTransformType(QualType BaseType,
UnaryTransformType::UTTKind UKind,
SourceLocation Loc);
//===--------------------------------------------------------------------===//
// Symbol table / Decl tracking callbacks: SemaDecl.cpp.
//
struct SkipBodyInfo {
SkipBodyInfo()
: ShouldSkip(false), CheckSameAsPrevious(false), Previous(nullptr),
New(nullptr) {}
bool ShouldSkip;
bool CheckSameAsPrevious;
NamedDecl *Previous;
NamedDecl *New;
};
DeclGroupPtrTy ConvertDeclToDeclGroup(Decl *Ptr, Decl *OwnedType = nullptr);
void DiagnoseUseOfUnimplementedSelectors();
bool isSimpleTypeSpecifier(tok::TokenKind Kind) const;
ParsedType getTypeName(const IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec *SS = nullptr,
bool isClassName = false, bool HasTrailingDot = false,
ParsedType ObjectType = nullptr,
bool IsCtorOrDtorName = false,
bool WantNontrivialTypeSourceInfo = false,
bool IsClassTemplateDeductionContext = true,
IdentifierInfo **CorrectedII = nullptr);
TypeSpecifierType isTagName(IdentifierInfo &II, Scope *S);
bool isMicrosoftMissingTypename(const CXXScopeSpec *SS, Scope *S);
void DiagnoseUnknownTypeName(IdentifierInfo *&II,
SourceLocation IILoc,
Scope *S,
CXXScopeSpec *SS,
ParsedType &SuggestedType,
bool IsTemplateName = false);
/// Attempt to behave like MSVC in situations where lookup of an unqualified
/// type name has failed in a dependent context. In these situations, we
/// automatically form a DependentTypeName that will retry lookup in a related
/// scope during instantiation.
ParsedType ActOnMSVCUnknownTypeName(const IdentifierInfo &II,
SourceLocation NameLoc,
bool IsTemplateTypeArg);
/// Describes the result of the name lookup and resolution performed
/// by \c ClassifyName().
enum NameClassificationKind {
NC_Unknown,
NC_Error,
NC_Keyword,
NC_Type,
NC_Expression,
NC_NestedNameSpecifier,
NC_TypeTemplate,
NC_VarTemplate,
NC_FunctionTemplate,
NC_UndeclaredTemplate,
};
class NameClassification {
NameClassificationKind Kind;
ExprResult Expr;
TemplateName Template;
ParsedType Type;
explicit NameClassification(NameClassificationKind Kind) : Kind(Kind) {}
public:
NameClassification(ExprResult Expr) : Kind(NC_Expression), Expr(Expr) {}
NameClassification(ParsedType Type) : Kind(NC_Type), Type(Type) {}
NameClassification(const IdentifierInfo *Keyword) : Kind(NC_Keyword) {}
static NameClassification Error() {
return NameClassification(NC_Error);
}
static NameClassification Unknown() {
return NameClassification(NC_Unknown);
}
static NameClassification NestedNameSpecifier() {
return NameClassification(NC_NestedNameSpecifier);
}
static NameClassification TypeTemplate(TemplateName Name) {
NameClassification Result(NC_TypeTemplate);
Result.Template = Name;
return Result;
}
static NameClassification VarTemplate(TemplateName Name) {
NameClassification Result(NC_VarTemplate);
Result.Template = Name;
return Result;
}
static NameClassification FunctionTemplate(TemplateName Name) {
NameClassification Result(NC_FunctionTemplate);
Result.Template = Name;
return Result;
}
static NameClassification UndeclaredTemplate(TemplateName Name) {
NameClassification Result(NC_UndeclaredTemplate);
Result.Template = Name;
return Result;
}
NameClassificationKind getKind() const { return Kind; }
ParsedType getType() const {
assert(Kind == NC_Type);
return Type;
}
ExprResult getExpression() const {
assert(Kind == NC_Expression);
return Expr;
}
TemplateName getTemplateName() const {
assert(Kind == NC_TypeTemplate || Kind == NC_FunctionTemplate ||
Kind == NC_VarTemplate || Kind == NC_UndeclaredTemplate);
return Template;
}
TemplateNameKind getTemplateNameKind() const {
switch (Kind) {
case NC_TypeTemplate:
return TNK_Type_template;
case NC_FunctionTemplate:
return TNK_Function_template;
case NC_VarTemplate:
return TNK_Var_template;
case NC_UndeclaredTemplate:
return TNK_Undeclared_template;
default:
llvm_unreachable("unsupported name classification.");
}
}
};
/// Perform name lookup on the given name, classifying it based on
/// the results of name lookup and the following token.
///
/// This routine is used by the parser to resolve identifiers and help direct
/// parsing. When the identifier cannot be found, this routine will attempt
/// to correct the typo and classify based on the resulting name.
///
/// \param S The scope in which we're performing name lookup.
///
/// \param SS The nested-name-specifier that precedes the name.
///
/// \param Name The identifier. If typo correction finds an alternative name,
/// this pointer parameter will be updated accordingly.
///
/// \param NameLoc The location of the identifier.
///
/// \param NextToken The token following the identifier. Used to help
/// disambiguate the name.
///
/// \param IsAddressOfOperand True if this name is the operand of a unary
/// address of ('&') expression, assuming it is classified as an
/// expression.
///
/// \param CCC The correction callback, if typo correction is desired.
NameClassification ClassifyName(Scope *S, CXXScopeSpec &SS,
IdentifierInfo *&Name, SourceLocation NameLoc,
const Token &NextToken,
bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr);
/// Describes the detailed kind of a template name. Used in diagnostics.
enum class TemplateNameKindForDiagnostics {
ClassTemplate,
FunctionTemplate,
VarTemplate,
AliasTemplate,
TemplateTemplateParam,
Concept,
DependentTemplate
};
TemplateNameKindForDiagnostics
getTemplateNameKindForDiagnostics(TemplateName Name);
/// Determine whether it's plausible that E was intended to be a
/// template-name.
bool mightBeIntendedToBeTemplateName(ExprResult E, bool &Dependent) {
if (!getLangOpts().CPlusPlus || E.isInvalid())
return false;
Dependent = false;
if (auto *DRE = dyn_cast<DeclRefExpr>(E.get()))
return !DRE->hasExplicitTemplateArgs();
if (auto *ME = dyn_cast<MemberExpr>(E.get()))
return !ME->hasExplicitTemplateArgs();
Dependent = true;
if (auto *DSDRE = dyn_cast<DependentScopeDeclRefExpr>(E.get()))
return !DSDRE->hasExplicitTemplateArgs();
if (auto *DSME = dyn_cast<CXXDependentScopeMemberExpr>(E.get()))
return !DSME->hasExplicitTemplateArgs();
// Any additional cases recognized here should also be handled by
// diagnoseExprIntendedAsTemplateName.
return false;
}
void diagnoseExprIntendedAsTemplateName(Scope *S, ExprResult TemplateName,
SourceLocation Less,
SourceLocation Greater);
Decl *ActOnDeclarator(Scope *S, Declarator &D);
NamedDecl *HandleDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParameterLists);
void RegisterLocallyScopedExternCDecl(NamedDecl *ND, Scope *S);
bool DiagnoseClassNameShadow(DeclContext *DC, DeclarationNameInfo Info);
bool diagnoseQualifiedDeclaration(CXXScopeSpec &SS, DeclContext *DC,
DeclarationName Name, SourceLocation Loc,
bool IsTemplateId);
void
diagnoseIgnoredQualifiers(unsigned DiagID, unsigned Quals,
SourceLocation FallbackLoc,
SourceLocation ConstQualLoc = SourceLocation(),
SourceLocation VolatileQualLoc = SourceLocation(),
SourceLocation RestrictQualLoc = SourceLocation(),
SourceLocation AtomicQualLoc = SourceLocation(),
SourceLocation UnalignedQualLoc = SourceLocation());
static bool adjustContextForLocalExternDecl(DeclContext *&DC);
void DiagnoseFunctionSpecifiers(const DeclSpec &DS);
NamedDecl *getShadowedDeclaration(const TypedefNameDecl *D,
const LookupResult &R);
NamedDecl *getShadowedDeclaration(const VarDecl *D, const LookupResult &R);
void CheckShadow(NamedDecl *D, NamedDecl *ShadowedDecl,
const LookupResult &R);
void CheckShadow(Scope *S, VarDecl *D);
/// Warn if 'E', which is an expression that is about to be modified, refers
/// to a shadowing declaration.
void CheckShadowingDeclModification(Expr *E, SourceLocation Loc);
void DiagnoseShadowingLambdaDecls(const sema::LambdaScopeInfo *LSI);
private:
/// Map of current shadowing declarations to shadowed declarations. Warn if
/// it looks like the user is trying to modify the shadowing declaration.
llvm::DenseMap<const NamedDecl *, const NamedDecl *> ShadowingDecls;
public:
void CheckCastAlign(Expr *Op, QualType T, SourceRange TRange);
void handleTagNumbering(const TagDecl *Tag, Scope *TagScope);
void setTagNameForLinkagePurposes(TagDecl *TagFromDeclSpec,
TypedefNameDecl *NewTD);
void CheckTypedefForVariablyModifiedType(Scope *S, TypedefNameDecl *D);
NamedDecl* ActOnTypedefDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous);
NamedDecl* ActOnTypedefNameDecl(Scope* S, DeclContext* DC, TypedefNameDecl *D,
LookupResult &Previous, bool &Redeclaration);
NamedDecl *ActOnVariableDeclarator(Scope *S, Declarator &D, DeclContext *DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope,
ArrayRef<BindingDecl *> Bindings = None);
NamedDecl *
ActOnDecompositionDeclarator(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists);
// Returns true if the variable declaration is a redeclaration
bool CheckVariableDeclaration(VarDecl *NewVD, LookupResult &Previous);
void CheckVariableDeclarationType(VarDecl *NewVD);
bool DeduceVariableDeclarationType(VarDecl *VDecl, bool DirectInit,
Expr *Init);
void CheckCompleteVariableDeclaration(VarDecl *VD);
void CheckCompleteDecompositionDeclaration(DecompositionDecl *DD);
void MaybeSuggestAddingStaticToDecl(const FunctionDecl *D);
NamedDecl* ActOnFunctionDeclarator(Scope* S, Declarator& D, DeclContext* DC,
TypeSourceInfo *TInfo,
LookupResult &Previous,
MultiTemplateParamsArg TemplateParamLists,
bool &AddToScope);
bool AddOverriddenMethods(CXXRecordDecl *DC, CXXMethodDecl *MD);
bool CheckConstexprFunctionDecl(const FunctionDecl *FD);
bool CheckConstexprFunctionBody(const FunctionDecl *FD, Stmt *Body);
void DiagnoseHiddenVirtualMethods(CXXMethodDecl *MD);
void FindHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
void NoteHiddenVirtualMethods(CXXMethodDecl *MD,
SmallVectorImpl<CXXMethodDecl*> &OverloadedMethods);
// Returns true if the function declaration is a redeclaration
bool CheckFunctionDeclaration(Scope *S,
FunctionDecl *NewFD, LookupResult &Previous,
bool IsMemberSpecialization);
bool shouldLinkDependentDeclWithPrevious(Decl *D, Decl *OldDecl);
bool canFullyTypeCheckRedeclaration(ValueDecl *NewD, ValueDecl *OldD,
QualType NewT, QualType OldT);
void CheckMain(FunctionDecl *FD, const DeclSpec &D);
void CheckMSVCRTEntryPoint(FunctionDecl *FD);
Attr *getImplicitCodeSegOrSectionAttrForFunction(const FunctionDecl *FD,
bool IsDefinition);
void CheckFunctionOrTemplateParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *ActOnParamDeclarator(Scope *S, Declarator &D);
ParmVarDecl *BuildParmVarDeclForTypedef(DeclContext *DC,
SourceLocation Loc,
QualType T);
ParmVarDecl *CheckParameter(DeclContext *DC, SourceLocation StartLoc,
SourceLocation NameLoc, IdentifierInfo *Name,
QualType T, TypeSourceInfo *TSInfo,
StorageClass SC);
void ActOnParamDefaultArgument(Decl *param,
SourceLocation EqualLoc,
Expr *defarg);
void ActOnParamUnparsedDefaultArgument(Decl *param,
SourceLocation EqualLoc,
SourceLocation ArgLoc);
void ActOnParamDefaultArgumentError(Decl *param, SourceLocation EqualLoc);
bool SetParamDefaultArgument(ParmVarDecl *Param, Expr *DefaultArg,
SourceLocation EqualLoc);
// Contexts where using non-trivial C union types can be disallowed. This is
// passed to err_non_trivial_c_union_in_invalid_context.
enum NonTrivialCUnionContext {
// Function parameter.
NTCUC_FunctionParam,
// Function return.
NTCUC_FunctionReturn,
// Default-initialized object.
NTCUC_DefaultInitializedObject,
// Variable with automatic storage duration.
NTCUC_AutoVar,
// Initializer expression that might copy from another object.
NTCUC_CopyInit,
// Assignment.
NTCUC_Assignment,
// Compound literal.
NTCUC_CompoundLiteral,
// Block capture.
NTCUC_BlockCapture,
// lvalue-to-rvalue conversion of volatile type.
NTCUC_LValueToRValueVolatile,
};
/// Emit diagnostics if the initializer or any of its explicit or
/// implicitly-generated subexpressions require copying or
/// default-initializing a type that is or contains a C union type that is
/// non-trivial to copy or default-initialize.
void checkNonTrivialCUnionInInitializer(const Expr *Init, SourceLocation Loc);
// These flags are passed to checkNonTrivialCUnion.
enum NonTrivialCUnionKind {
NTCUK_Init = 0x1,
NTCUK_Destruct = 0x2,
NTCUK_Copy = 0x4,
};
/// Emit diagnostics if a non-trivial C union type or a struct that contains
/// a non-trivial C union is used in an invalid context.
void checkNonTrivialCUnion(QualType QT, SourceLocation Loc,
NonTrivialCUnionContext UseContext,
unsigned NonTrivialKind);
void AddInitializerToDecl(Decl *dcl, Expr *init, bool DirectInit,
SourceLocation EqualLoc = SourceLocation());
void ActOnUninitializedDecl(Decl *dcl);
void ActOnInitializerError(Decl *Dcl);
bool ValidateNTCheckedType(ASTContext &C, QualType VDeclType, Expr *Init);
void ActOnPureSpecifier(Decl *D, SourceLocation PureSpecLoc);
void ActOnCXXForRangeDecl(Decl *D);
StmtResult ActOnCXXForRangeIdentifier(Scope *S, SourceLocation IdentLoc,
IdentifierInfo *Ident,
ParsedAttributes &Attrs,
SourceLocation AttrEnd);
void SetDeclDeleted(Decl *dcl, SourceLocation DelLoc);
void SetDeclDefaulted(Decl *dcl, SourceLocation DefaultLoc);
void CheckStaticLocalForDllExport(VarDecl *VD);
void FinalizeDeclaration(Decl *D);
DeclGroupPtrTy FinalizeDeclaratorGroup(Scope *S, const DeclSpec &DS,
ArrayRef<Decl *> Group);
DeclGroupPtrTy BuildDeclaratorGroup(MutableArrayRef<Decl *> Group);
/// Should be called on all declarations that might have attached
/// documentation comments.
void ActOnDocumentableDecl(Decl *D);
void ActOnDocumentableDecls(ArrayRef<Decl *> Group);
void ActOnFinishKNRParamDeclarations(Scope *S, Declarator &D,
SourceLocation LocAfterDecls);
void CheckForFunctionRedefinition(
FunctionDecl *FD, const FunctionDecl *EffectiveDefinition = nullptr,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
Decl *ActOnStartOfFunctionDef(Scope *S, Decl *D,
SkipBodyInfo *SkipBody = nullptr);
void ActOnStartOfObjCMethodDef(Scope *S, Decl *D);
bool isObjCMethodDecl(Decl *D) {
return D && isa<ObjCMethodDecl>(D);
}
/// Determine whether we can delay parsing the body of a function or
/// function template until it is used, assuming we don't care about emitting
/// code for that function.
///
/// This will be \c false if we may need the body of the function in the
/// middle of parsing an expression (where it's impractical to switch to
/// parsing a different function), for instance, if it's constexpr in C++11
/// or has an 'auto' return type in C++14. These cases are essentially bugs.
bool canDelayFunctionBody(const Declarator &D);
/// Determine whether we can skip parsing the body of a function
/// definition, assuming we don't care about analyzing its body or emitting
/// code for that function.
///
/// This will be \c false only if we may need the body of the function in
/// order to parse the rest of the program (for instance, if it is
/// \c constexpr in C++11 or has an 'auto' return type in C++14).
bool canSkipFunctionBody(Decl *D);
void computeNRVO(Stmt *Body, sema::FunctionScopeInfo *Scope);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body);
Decl *ActOnFinishFunctionBody(Decl *Decl, Stmt *Body, bool IsInstantiation);
Decl *ActOnSkippedFunctionBody(Decl *Decl);
void ActOnFinishInlineFunctionDef(FunctionDecl *D);
/// ActOnFinishDelayedAttribute - Invoked when we have finished parsing an
/// attribute for which parsing is delayed.
void ActOnFinishDelayedAttribute(Scope *S, Decl *D, ParsedAttributes &Attrs);
/// Diagnose any unused parameters in the given sequence of
/// ParmVarDecl pointers.
void DiagnoseUnusedParameters(ArrayRef<ParmVarDecl *> Parameters);
/// Diagnose whether the size of parameters or return value of a
/// function or obj-c method definition is pass-by-value and larger than a
/// specified threshold.
void
DiagnoseSizeOfParametersAndReturnValue(ArrayRef<ParmVarDecl *> Parameters,
QualType ReturnTy, NamedDecl *D);
void DiagnoseInvalidJumps(Stmt *Body);
Decl *ActOnFileScopeAsmDecl(Expr *expr,
SourceLocation AsmLoc,
SourceLocation RParenLoc);
/// Handle a C++11 empty-declaration and attribute-declaration.
Decl *ActOnEmptyDeclaration(Scope *S, const ParsedAttributesView &AttrList,
SourceLocation SemiLoc);
enum class ModuleDeclKind {
Interface, ///< 'export module X;'
Implementation, ///< 'module X;'
};
/// The parser has processed a module-declaration that begins the definition
/// of a module interface or implementation.
DeclGroupPtrTy ActOnModuleDecl(SourceLocation StartLoc,
SourceLocation ModuleLoc, ModuleDeclKind MDK,
ModuleIdPath Path, bool IsFirstDecl);
/// The parser has processed a global-module-fragment declaration that begins
/// the definition of the global module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
DeclGroupPtrTy ActOnGlobalModuleFragmentDecl(SourceLocation ModuleLoc);
/// The parser has processed a private-module-fragment declaration that begins
/// the definition of the private module fragment of the current module unit.
/// \param ModuleLoc The location of the 'module' keyword.
/// \param PrivateLoc The location of the 'private' keyword.
DeclGroupPtrTy ActOnPrivateModuleFragmentDecl(SourceLocation ModuleLoc,
SourceLocation PrivateLoc);
/// The parser has processed a module import declaration.
///
/// \param StartLoc The location of the first token in the declaration. This
/// could be the location of an '@', 'export', or 'import'.
/// \param ExportLoc The location of the 'export' keyword, if any.
/// \param ImportLoc The location of the 'import' keyword.
/// \param Path The module access path.
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, ModuleIdPath Path);
DeclResult ActOnModuleImport(SourceLocation StartLoc,
SourceLocation ExportLoc,
SourceLocation ImportLoc, Module *M,
ModuleIdPath Path = {});
/// The parser has processed a module import translated from a
/// #include or similar preprocessing directive.
void ActOnModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
void BuildModuleInclude(SourceLocation DirectiveLoc, Module *Mod);
/// The parsed has entered a submodule.
void ActOnModuleBegin(SourceLocation DirectiveLoc, Module *Mod);
/// The parser has left a submodule.
void ActOnModuleEnd(SourceLocation DirectiveLoc, Module *Mod);
/// Create an implicit import of the given module at the given
/// source location, for error recovery, if possible.
///
/// This routine is typically used when an entity found by name lookup
/// is actually hidden within a module that we know about but the user
/// has forgotten to import.
void createImplicitModuleImportForErrorRecovery(SourceLocation Loc,
Module *Mod);
/// Kinds of missing import. Note, the values of these enumerators correspond
/// to %select values in diagnostics.
enum class MissingImportKind {
Declaration,
Definition,
DefaultArgument,
ExplicitSpecialization,
PartialSpecialization
};
/// Diagnose that the specified declaration needs to be visible but
/// isn't, and suggest a module import that would resolve the problem.
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
MissingImportKind MIK, bool Recover = true);
void diagnoseMissingImport(SourceLocation Loc, NamedDecl *Decl,
SourceLocation DeclLoc, ArrayRef<Module *> Modules,
MissingImportKind MIK, bool Recover);
Decl *ActOnStartExportDecl(Scope *S, SourceLocation ExportLoc,
SourceLocation LBraceLoc);
Decl *ActOnFinishExportDecl(Scope *S, Decl *ExportDecl,
SourceLocation RBraceLoc);
/// We've found a use of a templated declaration that would trigger an
/// implicit instantiation. Check that any relevant explicit specializations
/// and partial specializations are visible, and diagnose if not.
void checkSpecializationVisibility(SourceLocation Loc, NamedDecl *Spec);
/// We've found a use of a template specialization that would select a
/// partial specialization. Check that the partial specialization is visible,
/// and diagnose if not.
void checkPartialSpecializationVisibility(SourceLocation Loc,
NamedDecl *Spec);
/// Retrieve a suitable printing policy for diagnostics.
PrintingPolicy getPrintingPolicy() const {
return getPrintingPolicy(Context, PP);
}
/// Retrieve a suitable printing policy for diagnostics.
static PrintingPolicy getPrintingPolicy(const ASTContext &Ctx,
const Preprocessor &PP);
/// Scope actions.
void ActOnPopScope(SourceLocation Loc, Scope *S);
void ActOnTranslationUnitScope(Scope *S);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
RecordDecl *&AnonRecord);
Decl *ParsedFreeStandingDeclSpec(Scope *S, AccessSpecifier AS, DeclSpec &DS,
MultiTemplateParamsArg TemplateParams,
bool IsExplicitInstantiation,
RecordDecl *&AnonRecord);
Decl *BuildAnonymousStructOrUnion(Scope *S, DeclSpec &DS,
AccessSpecifier AS,
RecordDecl *Record,
const PrintingPolicy &Policy);
Decl *BuildMicrosoftCAnonymousStruct(Scope *S, DeclSpec &DS,
RecordDecl *Record);
/// Common ways to introduce type names without a tag for use in diagnostics.
/// Keep in sync with err_tag_reference_non_tag.
enum NonTagKind {
NTK_NonStruct,
NTK_NonClass,
NTK_NonUnion,
NTK_NonEnum,
NTK_Typedef,
NTK_TypeAlias,
NTK_Template,
NTK_TypeAliasTemplate,
NTK_TemplateTemplateArgument,
};
/// Given a non-tag type declaration, returns an enum useful for indicating
/// what kind of non-tag type this is.
NonTagKind getNonTagTypeDeclKind(const Decl *D, TagTypeKind TTK);
bool isAcceptableTagRedeclaration(const TagDecl *Previous,
TagTypeKind NewTag, bool isDefinition,
SourceLocation NewTagLoc,
const IdentifierInfo *Name);
enum TagUseKind {
TUK_Reference, // Reference to a tag: 'struct foo *X;'
TUK_Declaration, // Fwd decl of a tag: 'struct foo;'
TUK_Definition, // Definition of a tag: 'struct foo { int X; } Y;'
TUK_Friend // Friend declaration: 'friend struct foo;'
};
Decl *ActOnTag(Scope *S, unsigned TagSpec, TagUseKind TUK,
SourceLocation KWLoc, CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc, const ParsedAttributesView &Attr,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
MultiTemplateParamsArg TemplateParameterLists, bool &OwnedDecl,
bool &IsDependent, SourceLocation ScopedEnumKWLoc,
bool ScopedEnumUsesClassTag, TypeResult UnderlyingType,
bool IsTypeSpecifier, bool IsTemplateParamOrArg,
SkipBodyInfo *SkipBody = nullptr,
RecordDecl::Genericity GenericKind = RecordDecl::NonGeneric,
ArrayRef<TypedefDecl *> TypeParams = ArrayRef<TypedefDecl *> {nullptr, 0} );
Decl *ActOnTemplatedFriendTag(Scope *S, SourceLocation FriendLoc,
unsigned TagSpec, SourceLocation TagLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TempParamLists);
TypeResult ActOnDependentTag(Scope *S,
unsigned TagSpec,
TagUseKind TUK,
const CXXScopeSpec &SS,
IdentifierInfo *Name,
SourceLocation TagLoc,
SourceLocation NameLoc);
void ActOnDefs(Scope *S, Decl *TagD, SourceLocation DeclStart,
IdentifierInfo *ClassName,
SmallVectorImpl<Decl *> &Decls);
FieldDecl *ActOnField(Scope *S, Decl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth);
FieldDecl *HandleField(Scope *S, RecordDecl *TagD, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS);
MSPropertyDecl *HandleMSProperty(Scope *S, RecordDecl *TagD,
SourceLocation DeclStart, Declarator &D,
Expr *BitfieldWidth,
InClassInitStyle InitStyle,
AccessSpecifier AS,
const ParsedAttr &MSPropertyAttr);
FieldDecl *CheckFieldDecl(DeclarationName Name, QualType T,
TypeSourceInfo *TInfo,
RecordDecl *Record, SourceLocation Loc,
bool Mutable, Expr *BitfieldWidth,
InClassInitStyle InitStyle,
SourceLocation TSSL,
AccessSpecifier AS, NamedDecl *PrevDecl,
Declarator *D = nullptr);
bool CheckNontrivialField(FieldDecl *FD);
void DiagnoseNontrivial(const CXXRecordDecl *Record, CXXSpecialMember CSM);
enum TrivialABIHandling {
/// The triviality of a method unaffected by "trivial_abi".
TAH_IgnoreTrivialABI,
/// The triviality of a method affected by "trivial_abi".
TAH_ConsiderTrivialABI
};
bool SpecialMemberIsTrivial(CXXMethodDecl *MD, CXXSpecialMember CSM,
TrivialABIHandling TAH = TAH_IgnoreTrivialABI,
bool Diagnose = false);
CXXSpecialMember getSpecialMember(const CXXMethodDecl *MD);
void ActOnLastBitfield(SourceLocation DeclStart,
SmallVectorImpl<Decl *> &AllIvarDecls);
Decl *ActOnIvar(Scope *S, SourceLocation DeclStart,
Declarator &D, Expr *BitfieldWidth,
tok::ObjCKeywordKind visibility);
// This is used for both record definitions and ObjC interface declarations.
void ActOnFields(Scope *S, SourceLocation RecLoc, Decl *TagDecl,
ArrayRef<Decl *> Fields, SourceLocation LBrac,
SourceLocation RBrac, const ParsedAttributesView &AttrList);
/// ActOnTagStartDefinition - Invoked when we have entered the
/// scope of a tag's definition (e.g., for an enumeration, class,
/// struct, or union).
void ActOnTagStartDefinition(Scope *S, Decl *TagDecl);
/// Perform ODR-like check for C/ObjC when merging tag types from modules.
/// Differently from C++, actually parse the body and reject / error out
/// in case of a structural mismatch.
bool ActOnDuplicateDefinition(DeclSpec &DS, Decl *Prev,
SkipBodyInfo &SkipBody);
typedef void *SkippedDefinitionContext;
/// Invoked when we enter a tag definition that we're skipping.
SkippedDefinitionContext ActOnTagStartSkippedDefinition(Scope *S, Decl *TD);
Decl *ActOnObjCContainerStartDefinition(Decl *IDecl);
/// ActOnStartCXXMemberDeclarations - Invoked when we have parsed a
/// C++ record definition's base-specifiers clause and are starting its
/// member declarations.
void ActOnStartCXXMemberDeclarations(Scope *S, Decl *TagDecl,
SourceLocation FinalLoc,
bool IsFinalSpelledSealed,
SourceLocation LBraceLoc);
/// ActOnTagFinishDefinition - Invoked once we have finished parsing
/// the definition of a tag (enumeration, class, struct, or union).
void ActOnTagFinishDefinition(Scope *S, Decl *TagDecl,
SourceRange BraceRange);
void ActOnTagFinishSkippedDefinition(SkippedDefinitionContext Context);
void ActOnObjCContainerFinishDefinition();
/// Invoked when we must temporarily exit the objective-c container
/// scope for parsing/looking-up C constructs.
///
/// Must be followed by a call to \see ActOnObjCReenterContainerContext
void ActOnObjCTemporaryExitContainerContext(DeclContext *DC);
void ActOnObjCReenterContainerContext(DeclContext *DC);
/// ActOnTagDefinitionError - Invoked when there was an unrecoverable
/// error parsing the definition of a tag.
void ActOnTagDefinitionError(Scope *S, Decl *TagDecl);
EnumConstantDecl *CheckEnumConstant(EnumDecl *Enum,
EnumConstantDecl *LastEnumConst,
SourceLocation IdLoc,
IdentifierInfo *Id,
Expr *val);
bool CheckEnumUnderlyingType(TypeSourceInfo *TI);
bool CheckEnumRedeclaration(SourceLocation EnumLoc, bool IsScoped,
QualType EnumUnderlyingTy, bool IsFixed,
const EnumDecl *Prev);
/// Determine whether the body of an anonymous enumeration should be skipped.
/// \param II The name of the first enumerator.
SkipBodyInfo shouldSkipAnonEnumBody(Scope *S, IdentifierInfo *II,
SourceLocation IILoc);
Decl *ActOnEnumConstant(Scope *S, Decl *EnumDecl, Decl *LastEnumConstant,
SourceLocation IdLoc, IdentifierInfo *Id,
const ParsedAttributesView &Attrs,
SourceLocation EqualLoc, Expr *Val);
void ActOnEnumBody(SourceLocation EnumLoc, SourceRange BraceRange,
Decl *EnumDecl, ArrayRef<Decl *> Elements, Scope *S,
const ParsedAttributesView &Attr);
DeclContext *getContainingDC(DeclContext *DC);
/// Set the current declaration context until it gets popped.
void PushDeclContext(Scope *S, DeclContext *DC);
void PopDeclContext();
/// EnterDeclaratorContext - Used when we must lookup names in the context
/// of a declarator's nested name specifier.
void EnterDeclaratorContext(Scope *S, DeclContext *DC);
void ExitDeclaratorContext(Scope *S);
/// Push the parameters of D, which must be a function, into scope.
void ActOnReenterFunctionContext(Scope* S, Decl* D);
void ActOnExitFunctionContext();
/// Push the parameters listed in Params into scope.
void ActOnSetupParametersAgain(Scope* S, ArrayRef<ParmVarDecl *> Params);
DeclContext *getFunctionLevelDeclContext();
/// getCurFunctionDecl - If inside of a function body, this returns a pointer
/// to the function decl for the function being parsed. If we're currently
/// in a 'block', this returns the containing context.
FunctionDecl *getCurFunctionDecl();
/// getCurMethodDecl - If inside of a method body, this returns a pointer to
/// the method decl for the method being parsed. If we're currently
/// in a 'block', this returns the containing context.
ObjCMethodDecl *getCurMethodDecl();
/// getCurFunctionOrMethodDecl - Return the Decl for the current ObjC method
/// or C function we're in, otherwise return null. If we're currently
/// in a 'block', this returns the containing context.
NamedDecl *getCurFunctionOrMethodDecl();
/// Add this decl to the scope shadowed decl chains.
void PushOnScopeChains(NamedDecl *D, Scope *S, bool AddToContext = true);
/// isDeclInScope - If 'Ctx' is a function/method, isDeclInScope returns true
/// if 'D' is in Scope 'S', otherwise 'S' is ignored and isDeclInScope returns
/// true if 'D' belongs to the given declaration context.
///
/// \param AllowInlineNamespace If \c true, allow the declaration to be in the
/// enclosing namespace set of the context, rather than contained
/// directly within it.
bool isDeclInScope(NamedDecl *D, DeclContext *Ctx, Scope *S = nullptr,
bool AllowInlineNamespace = false);
/// Finds the scope corresponding to the given decl context, if it
/// happens to be an enclosing scope. Otherwise return NULL.
static Scope *getScopeForDeclContext(Scope *S, DeclContext *DC);
/// Subroutines of ActOnDeclarator().
TypedefDecl *ParseTypedefDecl(Scope *S, Declarator &D, QualType T,
TypeSourceInfo *TInfo);
bool isIncompatibleTypedef(TypeDecl *Old, TypedefNameDecl *New);
/// Describes the kind of merge to perform for availability
/// attributes (including "deprecated", "unavailable", and "availability").
enum AvailabilityMergeKind {
/// Don't merge availability attributes at all.
AMK_None,
/// Merge availability attributes for a redeclaration, which requires
/// an exact match.
AMK_Redeclaration,
/// Merge availability attributes for an override, which requires
/// an exact match or a weakening of constraints.
AMK_Override,
/// Merge availability attributes for an implementation of
/// a protocol requirement.
AMK_ProtocolImplementation,
};
/// Describes the kind of priority given to an availability attribute.
///
/// The sum of priorities deteremines the final priority of the attribute.
/// The final priority determines how the attribute will be merged.
/// An attribute with a lower priority will always remove higher priority
/// attributes for the specified platform when it is being applied. An
/// attribute with a higher priority will not be applied if the declaration
/// already has an availability attribute with a lower priority for the
/// specified platform. The final prirority values are not expected to match
/// the values in this enumeration, but instead should be treated as a plain
/// integer value. This enumeration just names the priority weights that are
/// used to calculate that final vaue.
enum AvailabilityPriority : int {
/// The availability attribute was specified explicitly next to the
/// declaration.
AP_Explicit = 0,
/// The availability attribute was applied using '#pragma clang attribute'.
AP_PragmaClangAttribute = 1,
/// The availability attribute for a specific platform was inferred from
/// an availability attribute for another platform.
AP_InferredFromOtherPlatform = 2
};
/// Attribute merging methods. Return true if a new attribute was added.
AvailabilityAttr *mergeAvailabilityAttr(
NamedDecl *D, SourceRange Range, IdentifierInfo *Platform, bool Implicit,
VersionTuple Introduced, VersionTuple Deprecated, VersionTuple Obsoleted,
bool IsUnavailable, StringRef Message, bool IsStrict,
StringRef Replacement, AvailabilityMergeKind AMK, int Priority,
unsigned AttrSpellingListIndex);
TypeVisibilityAttr *mergeTypeVisibilityAttr(Decl *D, SourceRange Range,
TypeVisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
VisibilityAttr *mergeVisibilityAttr(Decl *D, SourceRange Range,
VisibilityAttr::VisibilityType Vis,
unsigned AttrSpellingListIndex);
UuidAttr *mergeUuidAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex, StringRef Uuid);
DLLImportAttr *mergeDLLImportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
DLLExportAttr *mergeDLLExportAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
MSInheritanceAttr *
mergeMSInheritanceAttr(Decl *D, SourceRange Range, bool BestCase,
unsigned AttrSpellingListIndex,
MSInheritanceAttr::Spelling SemanticSpelling);
FormatAttr *mergeFormatAttr(Decl *D, SourceRange Range,
IdentifierInfo *Format, int FormatIdx,
int FirstArg, unsigned AttrSpellingListIndex);
SectionAttr *mergeSectionAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
CodeSegAttr *mergeCodeSegAttr(Decl *D, SourceRange Range, StringRef Name,
unsigned AttrSpellingListIndex);
AlwaysInlineAttr *mergeAlwaysInlineAttr(Decl *D, SourceRange Range,
IdentifierInfo *Ident,
unsigned AttrSpellingListIndex);
MinSizeAttr *mergeMinSizeAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
NoSpeculativeLoadHardeningAttr *
mergeNoSpeculativeLoadHardeningAttr(Decl *D,
const NoSpeculativeLoadHardeningAttr &AL);
SpeculativeLoadHardeningAttr *
mergeSpeculativeLoadHardeningAttr(Decl *D,
const SpeculativeLoadHardeningAttr &AL);
OptimizeNoneAttr *mergeOptimizeNoneAttr(Decl *D, SourceRange Range,
unsigned AttrSpellingListIndex);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D, const ParsedAttr &AL);
InternalLinkageAttr *mergeInternalLinkageAttr(Decl *D,
const InternalLinkageAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const ParsedAttr &AL);
CommonAttr *mergeCommonAttr(Decl *D, const CommonAttr &AL);
void mergeDeclAttributes(NamedDecl *New, Decl *Old,
AvailabilityMergeKind AMK = AMK_Redeclaration);
void MergeTypedefNameDecl(Scope *S, TypedefNameDecl *New,
LookupResult &OldDecls);
bool MergeFunctionDecl(FunctionDecl *New, NamedDecl *&Old, Scope *S,
bool MergeTypeWithOld);
bool MergeCompatibleFunctionDecls(FunctionDecl *New, FunctionDecl *Old,
Scope *S, bool MergeTypeWithOld);
void mergeObjCMethodDecls(ObjCMethodDecl *New, ObjCMethodDecl *Old);
void MergeVarDecl(VarDecl *New, LookupResult &Previous);
void MergeVarDeclTypes(VarDecl *New, VarDecl *Old, bool MergeTypeWithOld);
void MergeVarDeclExceptionSpecs(VarDecl *New, VarDecl *Old);
bool checkVarDeclRedefinition(VarDecl *OldDefn, VarDecl *NewDefn);
void notePreviousDefinition(const NamedDecl *Old, SourceLocation New);
bool MergeCXXFunctionDecl(FunctionDecl *New, FunctionDecl *Old, Scope *S);
// Checked C specific methods for merging function declarations.
bool CheckedCFunctionDeclCompatibility(FunctionDecl *New, FunctionDecl *Old);
bool CheckedCMergeFunctionDecls(FunctionDecl *New, FunctionDecl *Old);
bool DiagnoseCheckedCFunctionCompatibility(FunctionDecl *New,
FunctionDecl *Old);
// used for %select in diagnostics for errors involving checked types.
enum class CheckedTypeClassification {
CCT_Any,
CCT_Struct,
CCT_Union
};
// used for %select in diagnostics for errors involving redeclarations
// with bounds
enum class CheckedCBoundsError {
CCBE_Parameter,
CCBE_Return,
CCBE_Variable
};
// used for %select in diagnostics for errors involving redeclarations
// with bounds annotations.
enum class BoundsAnnotationKind {
Bounds,
IType
};
CheckedTypeClassification classifyForCheckedTypeDiagnostic(QualType qt);
// AssignmentAction - This is used by all the assignment diagnostic functions
// to represent what is actually causing the operation
enum AssignmentAction {
AA_Assigning,
AA_Passing,
AA_Returning,
AA_Converting,
AA_Initializing,
AA_Sending,
AA_Casting,
AA_Passing_CFAudited
};
/// C++ Overloading.
enum OverloadKind {
/// This is a legitimate overload: the existing declarations are
/// functions or function templates with different signatures.
Ovl_Overload,
/// This is not an overload because the signature exactly matches
/// an existing declaration.
Ovl_Match,
/// This is not an overload because the lookup results contain a
/// non-function.
Ovl_NonFunction
};
OverloadKind CheckOverload(Scope *S,
FunctionDecl *New,
const LookupResult &OldDecls,
NamedDecl *&OldDecl,
bool IsForUsingDecl);
bool IsOverload(FunctionDecl *New, FunctionDecl *Old, bool IsForUsingDecl,
bool ConsiderCudaAttrs = true);
ImplicitConversionSequence
TryImplicitConversion(Expr *From, QualType ToType,
bool SuppressUserConversions,
bool AllowExplicit,
bool InOverloadResolution,
bool CStyle,
bool AllowObjCWritebackConversion);
bool IsIntegralPromotion(Expr *From, QualType FromType, QualType ToType);
bool IsFloatingPointPromotion(QualType FromType, QualType ToType);
bool IsComplexPromotion(QualType FromType, QualType ToType);
bool IsPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType, bool &IncompatibleObjC);
bool isObjCWritebackConversion(QualType FromType, QualType ToType,
QualType &ConvertedType);
bool IsBlockPointerConversion(QualType FromType, QualType ToType,
QualType& ConvertedType);
bool FunctionParamTypesAreEqual(const FunctionProtoType *OldType,
const FunctionProtoType *NewType,
unsigned *ArgPos = nullptr);
void HandleFunctionTypeMismatch(PartialDiagnostic &PDiag,
QualType FromType, QualType ToType);
void maybeExtendBlockObject(ExprResult &E);
CastKind PrepareCastToObjCObjectPointer(ExprResult &E);
bool CheckPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath& BasePath,
bool IgnoreBaseAccess,
bool Diagnose = true);
bool IsMemberPointerConversion(Expr *From, QualType FromType, QualType ToType,
bool InOverloadResolution,
QualType &ConvertedType);
bool CheckMemberPointerConversion(Expr *From, QualType ToType,
CastKind &Kind,
CXXCastPath &BasePath,
bool IgnoreBaseAccess);
bool IsQualificationConversion(QualType FromType, QualType ToType,
bool CStyle, bool &ObjCLifetimeConversion);
bool IsFunctionConversion(QualType FromType, QualType ToType,
QualType &ResultTy);
bool DiagnoseMultipleUserDefinedConversion(Expr *From, QualType ToType);
bool isSameOrCompatibleFunctionType(CanQualType Param, CanQualType Arg);
ExprResult PerformMoveOrCopyInitialization(const InitializedEntity &Entity,
const VarDecl *NRVOCandidate,
QualType ResultType,
Expr *Value,
bool AllowNRVO = true);
bool CanPerformCopyInitialization(const InitializedEntity &Entity,
ExprResult Init);
ExprResult PerformCopyInitialization(const InitializedEntity &Entity,
SourceLocation EqualLoc,
ExprResult Init,
bool TopLevelOfInitList = false,
bool AllowExplicit = false);
ExprResult PerformObjectArgumentInitialization(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
CXXMethodDecl *Method);
/// Check that the lifetime of the initializer (and its subobjects) is
/// sufficient for initializing the entity, and perform lifetime extension
/// (when permitted) if not.
void checkInitializerLifetime(const InitializedEntity &Entity, Expr *Init);
ExprResult PerformContextuallyConvertToBool(Expr *From);
ExprResult PerformContextuallyConvertToObjCPointer(Expr *From);
/// Contexts in which a converted constant expression is required.
enum CCEKind {
CCEK_CaseValue, ///< Expression in a case label.
CCEK_Enumerator, ///< Enumerator value with fixed underlying type.
CCEK_TemplateArg, ///< Value of a non-type template parameter.
CCEK_NewExpr, ///< Constant expression in a noptr-new-declarator.
CCEK_ConstexprIf, ///< Condition in a constexpr if statement.
CCEK_ExplicitBool ///< Condition in an explicit(bool) specifier.
};
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
llvm::APSInt &Value, CCEKind CCE);
ExprResult CheckConvertedConstantExpression(Expr *From, QualType T,
APValue &Value, CCEKind CCE);
/// Abstract base class used to perform a contextual implicit
/// conversion from an expression to any type passing a filter.
class ContextualImplicitConverter {
public:
bool Suppress;
bool SuppressConversion;
ContextualImplicitConverter(bool Suppress = false,
bool SuppressConversion = false)
: Suppress(Suppress), SuppressConversion(SuppressConversion) {}
/// Determine whether the specified type is a valid destination type
/// for this conversion.
virtual bool match(QualType T) = 0;
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the expression has incomplete class type.
virtual SemaDiagnosticBuilder
diagnoseIncomplete(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a diagnostic when the only matching conversion function
/// is explicit.
virtual SemaDiagnosticBuilder diagnoseExplicitConv(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
/// Emits a note for the explicit conversion function.
virtual SemaDiagnosticBuilder
noteExplicitConv(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when there are multiple possible conversion
/// functions.
virtual SemaDiagnosticBuilder
diagnoseAmbiguous(Sema &S, SourceLocation Loc, QualType T) = 0;
/// Emits a note for one of the candidate conversions.
virtual SemaDiagnosticBuilder
noteAmbiguous(Sema &S, CXXConversionDecl *Conv, QualType ConvTy) = 0;
/// Emits a diagnostic when we picked a conversion function
/// (for cases when we are not allowed to pick a conversion function).
virtual SemaDiagnosticBuilder diagnoseConversion(
Sema &S, SourceLocation Loc, QualType T, QualType ConvTy) = 0;
virtual ~ContextualImplicitConverter() {}
};
class ICEConvertDiagnoser : public ContextualImplicitConverter {
bool AllowScopedEnumerations;
public:
ICEConvertDiagnoser(bool AllowScopedEnumerations,
bool Suppress, bool SuppressConversion)
: ContextualImplicitConverter(Suppress, SuppressConversion),
AllowScopedEnumerations(AllowScopedEnumerations) {}
/// Match an integral or (possibly scoped) enumeration type.
bool match(QualType T) override;
SemaDiagnosticBuilder
diagnoseNoMatch(Sema &S, SourceLocation Loc, QualType T) override {
return diagnoseNotInt(S, Loc, T);
}
/// Emits a diagnostic complaining that the expression does not have
/// integral or enumeration type.
virtual SemaDiagnosticBuilder
diagnoseNotInt(Sema &S, SourceLocation Loc, QualType T) = 0;
};
/// Perform a contextual implicit conversion.
ExprResult PerformContextualImplicitConversion(
SourceLocation Loc, Expr *FromE, ContextualImplicitConverter &Converter);
enum ObjCSubscriptKind {
OS_Array,
OS_Dictionary,
OS_Error
};
ObjCSubscriptKind CheckSubscriptingKind(Expr *FromE);
// Note that LK_String is intentionally after the other literals, as
// this is used for diagnostics logic.
enum ObjCLiteralKind {
LK_Array,
LK_Dictionary,
LK_Numeric,
LK_Boxed,
LK_String,
LK_Block,
LK_None
};
ObjCLiteralKind CheckLiteralKind(Expr *FromE);
ExprResult PerformObjectMemberConversion(Expr *From,
NestedNameSpecifier *Qualifier,
NamedDecl *FoundDecl,
NamedDecl *Member);
// Members have to be NamespaceDecl* or TranslationUnitDecl*.
// TODO: make this is a typesafe union.
typedef llvm::SmallSetVector<DeclContext *, 16> AssociatedNamespaceSet;
typedef llvm::SmallSetVector<CXXRecordDecl *, 16> AssociatedClassSet;
using ADLCallKind = CallExpr::ADLCallKind;
void AddOverloadCandidate(FunctionDecl *Function, DeclAccessPair FoundDecl,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool AllowExplicit = true,
bool AllowExplicitConversion = false,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL,
ConversionSequenceList EarlyConversions = None);
void AddFunctionCandidates(const UnresolvedSetImpl &Functions,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
bool FirstArgumentIsBase = false);
void AddMethodCandidate(DeclAccessPair FoundDecl,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversion = false);
void AddMethodCandidate(CXXMethodDecl *Method,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false,
ConversionSequenceList EarlyConversions = None);
void AddMethodTemplateCandidate(FunctionTemplateDecl *MethodTmpl,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ObjectType,
Expr::Classification ObjectClassification,
ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool SuppressUserConversions = false,
bool PartialOverloading = false);
void AddTemplateOverloadCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet, bool SuppressUserConversions = false,
bool PartialOverloading = false, bool AllowExplicit = true,
ADLCallKind IsADLCandidate = ADLCallKind::NotADL);
bool CheckNonDependentConversions(FunctionTemplateDecl *FunctionTemplate,
ArrayRef<QualType> ParamTypes,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
ConversionSequenceList &Conversions,
bool SuppressUserConversions,
CXXRecordDecl *ActingContext = nullptr,
QualType ObjectType = QualType(),
Expr::Classification
ObjectClassification = {});
void AddConversionCandidate(
CXXConversionDecl *Conversion, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddTemplateConversionCandidate(
FunctionTemplateDecl *FunctionTemplate, DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext, Expr *From, QualType ToType,
OverloadCandidateSet &CandidateSet, bool AllowObjCConversionOnExplicit,
bool AllowExplicit, bool AllowResultConversion = true);
void AddSurrogateCandidate(CXXConversionDecl *Conversion,
DeclAccessPair FoundDecl,
CXXRecordDecl *ActingContext,
const FunctionProtoType *Proto,
Expr *Object, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddMemberOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
SourceRange OpRange = SourceRange());
void AddBuiltinCandidate(QualType *ParamTys, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet,
bool IsAssignmentOperator = false,
unsigned NumContextualBoolArguments = 0);
void AddBuiltinOperatorCandidates(OverloadedOperatorKind Op,
SourceLocation OpLoc, ArrayRef<Expr *> Args,
OverloadCandidateSet& CandidateSet);
void AddArgumentDependentLookupCandidates(DeclarationName Name,
SourceLocation Loc,
ArrayRef<Expr *> Args,
TemplateArgumentListInfo *ExplicitTemplateArgs,
OverloadCandidateSet& CandidateSet,
bool PartialOverloading = false);
// Emit as a 'note' the specific overload candidate
void NoteOverloadCandidate(NamedDecl *Found, FunctionDecl *Fn,
QualType DestType = QualType(),
bool TakingAddress = false);
// Emit as a series of 'note's all template and non-templates identified by
// the expression Expr
void NoteAllOverloadCandidates(Expr *E, QualType DestType = QualType(),
bool TakingAddress = false);
/// Check the enable_if expressions on the given function. Returns the first
/// failing attribute, or NULL if they were all successful.
EnableIfAttr *CheckEnableIf(FunctionDecl *Function, ArrayRef<Expr *> Args,
bool MissingImplicitThis = false);
/// Find the failed Boolean condition within a given Boolean
/// constant expression, and describe it with a string.
std::pair<Expr *, std::string> findFailedBooleanCondition(Expr *Cond);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// non-ArgDependent DiagnoseIfAttrs.
///
/// Argument-dependent diagnose_if attributes should be checked each time a
/// function is used as a direct callee of a function call.
///
/// Returns true if any errors were emitted.
bool diagnoseArgDependentDiagnoseIfAttrs(const FunctionDecl *Function,
const Expr *ThisArg,
ArrayRef<const Expr *> Args,
SourceLocation Loc);
/// Emit diagnostics for the diagnose_if attributes on Function, ignoring any
/// ArgDependent DiagnoseIfAttrs.
///
/// Argument-independent diagnose_if attributes should be checked on every use
/// of a function.
///
/// Returns true if any errors were emitted.
bool diagnoseArgIndependentDiagnoseIfAttrs(const NamedDecl *ND,
SourceLocation Loc);
/// Returns whether the given function's address can be taken or not,
/// optionally emitting a diagnostic if the address can't be taken.
///
/// Returns false if taking the address of the function is illegal.
bool checkAddressOfFunctionIsAvailable(const FunctionDecl *Function,
bool Complain = false,
SourceLocation Loc = SourceLocation());
// [PossiblyAFunctionType] --> [Return]
// NonFunctionType --> NonFunctionType
// R (A) --> R(A)
// R (*)(A) --> R (A)
// R (&)(A) --> R (A)
// R (S::*)(A) --> R (A)
QualType ExtractUnqualifiedFunctionType(QualType PossiblyAFunctionType);
FunctionDecl *
ResolveAddressOfOverloadedFunction(Expr *AddressOfExpr,
QualType TargetType,
bool Complain,
DeclAccessPair &Found,
bool *pHadMultipleCandidates = nullptr);
FunctionDecl *
resolveAddressOfOnlyViableOverloadCandidate(Expr *E,
DeclAccessPair &FoundResult);
bool resolveAndFixAddressOfOnlyViableOverloadCandidate(
ExprResult &SrcExpr, bool DoFunctionPointerConversion = false);
FunctionDecl *
ResolveSingleFunctionTemplateSpecialization(OverloadExpr *ovl,
bool Complain = false,
DeclAccessPair *Found = nullptr);
bool ResolveAndFixSingleFunctionTemplateSpecialization(
ExprResult &SrcExpr,
bool DoFunctionPointerConverion = false,
bool Complain = false,
SourceRange OpRangeForComplaining = SourceRange(),
QualType DestTypeForComplaining = QualType(),
unsigned DiagIDForComplaining = 0);
Expr *FixOverloadedFunctionReference(Expr *E,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
ExprResult FixOverloadedFunctionReference(ExprResult,
DeclAccessPair FoundDecl,
FunctionDecl *Fn);
void AddOverloadedCallCandidates(UnresolvedLookupExpr *ULE,
ArrayRef<Expr *> Args,
OverloadCandidateSet &CandidateSet,
bool PartialOverloading = false);
// An enum used to represent the different possible results of building a
// range-based for loop.
enum ForRangeStatus {
FRS_Success,
FRS_NoViableFunction,
FRS_DiagnosticIssued
};
ForRangeStatus BuildForRangeBeginEndCall(SourceLocation Loc,
SourceLocation RangeLoc,
const DeclarationNameInfo &NameInfo,
LookupResult &MemberLookup,
OverloadCandidateSet *CandidateSet,
Expr *Range, ExprResult *CallExpr);
ExprResult BuildOverloadedCallExpr(Scope *S, Expr *Fn,
UnresolvedLookupExpr *ULE,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc,
Expr *ExecConfig,
bool AllowTypoCorrection=true,
bool CalleesAddressIsTaken=false);
bool buildOverloadedCallSet(Scope *S, Expr *Fn, UnresolvedLookupExpr *ULE,
MultiExprArg Args, SourceLocation RParenLoc,
OverloadCandidateSet *CandidateSet,
ExprResult *Result);
ExprResult CreateOverloadedUnaryOp(SourceLocation OpLoc,
UnaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *input, bool RequiresADL = true);
ExprResult CreateOverloadedBinOp(SourceLocation OpLoc,
BinaryOperatorKind Opc,
const UnresolvedSetImpl &Fns,
Expr *LHS, Expr *RHS,
bool RequiresADL = true);
ExprResult CreateOverloadedArraySubscriptExpr(SourceLocation LLoc,
SourceLocation RLoc,
Expr *Base,Expr *Idx);
ExprResult
BuildCallToMemberFunction(Scope *S, Expr *MemExpr,
SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult
BuildCallToObjectOfClassType(Scope *S, Expr *Object, SourceLocation LParenLoc,
MultiExprArg Args,
SourceLocation RParenLoc);
ExprResult BuildOverloadedArrowExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
bool *NoArrowOperatorFound = nullptr);
/// CheckCallReturnType - Checks that a call expression's return type is
/// complete. Returns true on failure. The location passed in is the location
/// that best represents the call.
bool CheckCallReturnType(QualType ReturnType, SourceLocation Loc,
CallExpr *CE, FunctionDecl *FD);
/// Helpers for dealing with blocks and functions.
bool CheckParmsForFunctionDef(ArrayRef<ParmVarDecl *> Parameters,
bool CheckParameterNames);
void CheckCXXDefaultArguments(FunctionDecl *FD);
void CheckExtraCXXDefaultArguments(Declarator &D);
Scope *getNonFieldDeclScope(Scope *S);
/// \name Name lookup
///
/// These routines provide name lookup that is used during semantic
/// analysis to resolve the various kinds of names (identifiers,
/// overloaded operator names, constructor names, etc.) into zero or
/// more declarations within a particular scope. The major entry
/// points are LookupName, which performs unqualified name lookup,
/// and LookupQualifiedName, which performs qualified name lookup.
///
/// All name lookup is performed based on some specific criteria,
/// which specify what names will be visible to name lookup and how
/// far name lookup should work. These criteria are important both
/// for capturing language semantics (certain lookups will ignore
/// certain names, for example) and for performance, since name
/// lookup is often a bottleneck in the compilation of C++. Name
/// lookup criteria is specified via the LookupCriteria enumeration.
///
/// The results of name lookup can vary based on the kind of name
/// lookup performed, the current language, and the translation
/// unit. In C, for example, name lookup will either return nothing
/// (no entity found) or a single declaration. In C++, name lookup
/// can additionally refer to a set of overloaded functions or
/// result in an ambiguity. All of the possible results of name
/// lookup are captured by the LookupResult class, which provides
/// the ability to distinguish among them.
//@{
/// Describes the kind of name lookup to perform.
enum LookupNameKind {
/// Ordinary name lookup, which finds ordinary names (functions,
/// variables, typedefs, etc.) in C and most kinds of names
/// (functions, variables, members, types, etc.) in C++.
LookupOrdinaryName = 0,
/// Tag name lookup, which finds the names of enums, classes,
/// structs, and unions.
LookupTagName,
/// Label name lookup.
LookupLabel,
/// Member name lookup, which finds the names of
/// class/struct/union members.
LookupMemberName,
/// Look up of an operator name (e.g., operator+) for use with
/// operator overloading. This lookup is similar to ordinary name
/// lookup, but will ignore any declarations that are class members.
LookupOperatorName,
/// Look up of a name that precedes the '::' scope resolution
/// operator in C++. This lookup completely ignores operator, object,
/// function, and enumerator names (C++ [basic.lookup.qual]p1).
LookupNestedNameSpecifierName,
/// Look up a namespace name within a C++ using directive or
/// namespace alias definition, ignoring non-namespace names (C++
/// [basic.lookup.udir]p1).
LookupNamespaceName,
/// Look up all declarations in a scope with the given name,
/// including resolved using declarations. This is appropriate
/// for checking redeclarations for a using declaration.
LookupUsingDeclName,
/// Look up an ordinary name that is going to be redeclared as a
/// name with linkage. This lookup ignores any declarations that
/// are outside of the current scope unless they have linkage. See
/// C99 6.2.2p4-5 and C++ [basic.link]p6.
LookupRedeclarationWithLinkage,
/// Look up a friend of a local class. This lookup does not look
/// outside the innermost non-class scope. See C++11 [class.friend]p11.
LookupLocalFriendName,
/// Look up the name of an Objective-C protocol.
LookupObjCProtocolName,
/// Look up implicit 'self' parameter of an objective-c method.
LookupObjCImplicitSelfParam,
/// Look up the name of an OpenMP user-defined reduction operation.
LookupOMPReductionName,
/// Look up the name of an OpenMP user-defined mapper.
LookupOMPMapperName,
/// Look up any declaration with any name.
LookupAnyName
};
/// Specifies whether (or how) name lookup is being performed for a
/// redeclaration (vs. a reference).
enum RedeclarationKind {
/// The lookup is a reference to this name that is not for the
/// purpose of redeclaring the name.
NotForRedeclaration = 0,
/// The lookup results will be used for redeclaration of a name,
/// if an entity by that name already exists and is visible.
ForVisibleRedeclaration,
/// The lookup results will be used for redeclaration of a name
/// with external linkage; non-visible lookup results with external linkage
/// may also be found.
ForExternalRedeclaration
};
RedeclarationKind forRedeclarationInCurContext() {
// A declaration with an owning module for linkage can never link against
// anything that is not visible. We don't need to check linkage here; if
// the context has internal linkage, redeclaration lookup won't find things
// from other TUs, and we can't safely compute linkage yet in general.
if (cast<Decl>(CurContext)
->getOwningModuleForLinkage(/*IgnoreLinkage*/true))
return ForVisibleRedeclaration;
return ForExternalRedeclaration;
}
/// The possible outcomes of name lookup for a literal operator.
enum LiteralOperatorLookupResult {
/// The lookup resulted in an error.
LOLR_Error,
/// The lookup found no match but no diagnostic was issued.
LOLR_ErrorNoDiagnostic,
/// The lookup found a single 'cooked' literal operator, which
/// expects a normal literal to be built and passed to it.
LOLR_Cooked,
/// The lookup found a single 'raw' literal operator, which expects
/// a string literal containing the spelling of the literal token.
LOLR_Raw,
/// The lookup found an overload set of literal operator templates,
/// which expect the characters of the spelling of the literal token to be
/// passed as a non-type template argument pack.
LOLR_Template,
/// The lookup found an overload set of literal operator templates,
/// which expect the character type and characters of the spelling of the
/// string literal token to be passed as template arguments.
LOLR_StringTemplate
};
SpecialMemberOverloadResult LookupSpecialMember(CXXRecordDecl *D,
CXXSpecialMember SM,
bool ConstArg,
bool VolatileArg,
bool RValueThis,
bool ConstThis,
bool VolatileThis);
typedef std::function<void(const TypoCorrection &)> TypoDiagnosticGenerator;
typedef std::function<ExprResult(Sema &, TypoExpr *, TypoCorrection)>
TypoRecoveryCallback;
private:
bool CppLookupName(LookupResult &R, Scope *S);
struct TypoExprState {
std::unique_ptr<TypoCorrectionConsumer> Consumer;
TypoDiagnosticGenerator DiagHandler;
TypoRecoveryCallback RecoveryHandler;
TypoExprState();
TypoExprState(TypoExprState &&other) noexcept;
TypoExprState &operator=(TypoExprState &&other) noexcept;
};
/// The set of unhandled TypoExprs and their associated state.
llvm::MapVector<TypoExpr *, TypoExprState> DelayedTypos;
/// Creates a new TypoExpr AST node.
TypoExpr *createDelayedTypo(std::unique_ptr<TypoCorrectionConsumer> TCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC);
// The set of known/encountered (unique, canonicalized) NamespaceDecls.
//
// The boolean value will be true to indicate that the namespace was loaded
// from an AST/PCH file, or false otherwise.
llvm::MapVector<NamespaceDecl*, bool> KnownNamespaces;
/// Whether we have already loaded known namespaces from an extenal
/// source.
bool LoadedExternalKnownNamespaces;
/// Helper for CorrectTypo and CorrectTypoDelayed used to create and
/// populate a new TypoCorrectionConsumer. Returns nullptr if typo correction
/// should be skipped entirely.
std::unique_ptr<TypoCorrectionConsumer>
makeTypoCorrectionConsumer(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
DeclContext *MemberContext, bool EnteringContext,
const ObjCObjectPointerType *OPT,
bool ErrorRecovery);
public:
const TypoExprState &getTypoExprState(TypoExpr *TE) const;
/// Clears the state of the given TypoExpr.
void clearDelayedTypo(TypoExpr *TE);
/// Look up a name, looking for a single declaration. Return
/// null if the results were absent, ambiguous, or overloaded.
///
/// It is preferable to use the elaborated form and explicitly handle
/// ambiguity and overloaded.
NamedDecl *LookupSingleName(Scope *S, DeclarationName Name,
SourceLocation Loc,
LookupNameKind NameKind,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupName(LookupResult &R, Scope *S,
bool AllowBuiltinCreation = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
bool InUnqualifiedLookup = false);
bool LookupQualifiedName(LookupResult &R, DeclContext *LookupCtx,
CXXScopeSpec &SS);
bool LookupParsedName(LookupResult &R, Scope *S, CXXScopeSpec *SS,
bool AllowBuiltinCreation = false,
bool EnteringContext = false);
ObjCProtocolDecl *LookupProtocol(IdentifierInfo *II, SourceLocation IdLoc,
RedeclarationKind Redecl
= NotForRedeclaration);
bool LookupInSuper(LookupResult &R, CXXRecordDecl *Class);
void LookupOverloadedOperatorName(OverloadedOperatorKind Op, Scope *S,
QualType T1, QualType T2,
UnresolvedSetImpl &Functions);
LabelDecl *LookupOrCreateLabel(IdentifierInfo *II, SourceLocation IdentLoc,
SourceLocation GnuLabelLoc = SourceLocation());
DeclContextLookupResult LookupConstructors(CXXRecordDecl *Class);
CXXConstructorDecl *LookupDefaultConstructor(CXXRecordDecl *Class);
CXXConstructorDecl *LookupCopyingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupCopyingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXConstructorDecl *LookupMovingConstructor(CXXRecordDecl *Class,
unsigned Quals);
CXXMethodDecl *LookupMovingAssignment(CXXRecordDecl *Class, unsigned Quals,
bool RValueThis, unsigned ThisQuals);
CXXDestructorDecl *LookupDestructor(CXXRecordDecl *Class);
bool checkLiteralOperatorId(const CXXScopeSpec &SS, const UnqualifiedId &Id);
LiteralOperatorLookupResult LookupLiteralOperator(Scope *S, LookupResult &R,
ArrayRef<QualType> ArgTys,
bool AllowRaw,
bool AllowTemplate,
bool AllowStringTemplate,
bool DiagnoseMissing);
bool isKnownName(StringRef name);
void ArgumentDependentLookup(DeclarationName Name, SourceLocation Loc,
ArrayRef<Expr *> Args, ADLResult &Functions);
void LookupVisibleDecls(Scope *S, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool LoadExternal = true);
void LookupVisibleDecls(DeclContext *Ctx, LookupNameKind Kind,
VisibleDeclConsumer &Consumer,
bool IncludeGlobalScope = true,
bool IncludeDependentBases = false,
bool LoadExternal = true);
enum CorrectTypoKind {
CTK_NonError, // CorrectTypo used in a non error recovery situation.
CTK_ErrorRecovery // CorrectTypo used in normal error recovery.
};
TypoCorrection CorrectTypo(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind,
Scope *S, CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr,
bool RecordFailure = true);
TypoExpr *CorrectTypoDelayed(const DeclarationNameInfo &Typo,
Sema::LookupNameKind LookupKind, Scope *S,
CXXScopeSpec *SS,
CorrectionCandidateCallback &CCC,
TypoDiagnosticGenerator TDG,
TypoRecoveryCallback TRC, CorrectTypoKind Mode,
DeclContext *MemberContext = nullptr,
bool EnteringContext = false,
const ObjCObjectPointerType *OPT = nullptr);
/// Process any TypoExprs in the given Expr and its children,
/// generating diagnostics as appropriate and returning a new Expr if there
/// were typos that were all successfully corrected and ExprError if one or
/// more typos could not be corrected.
///
/// \param E The Expr to check for TypoExprs.
///
/// \param InitDecl A VarDecl to avoid because the Expr being corrected is its
/// initializer.
///
/// \param Filter A function applied to a newly rebuilt Expr to determine if
/// it is an acceptable/usable result from a single combination of typo
/// corrections. As long as the filter returns ExprError, different
/// combinations of corrections will be tried until all are exhausted.
ExprResult
CorrectDelayedTyposInExpr(Expr *E, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; });
ExprResult
CorrectDelayedTyposInExpr(Expr *E,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(E, nullptr, Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER, VarDecl *InitDecl = nullptr,
llvm::function_ref<ExprResult(Expr *)> Filter =
[](Expr *E) -> ExprResult { return E; }) {
return ER.isInvalid() ? ER : CorrectDelayedTyposInExpr(ER.get(), Filter);
}
ExprResult
CorrectDelayedTyposInExpr(ExprResult ER,
llvm::function_ref<ExprResult(Expr *)> Filter) {
return CorrectDelayedTyposInExpr(ER, nullptr, Filter);
}
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
bool ErrorRecovery = true);
void diagnoseTypo(const TypoCorrection &Correction,
const PartialDiagnostic &TypoDiag,
const PartialDiagnostic &PrevNote,
bool ErrorRecovery = true);
void MarkTypoCorrectedFunctionDefinition(const NamedDecl *F);
void FindAssociatedClassesAndNamespaces(SourceLocation InstantiationLoc,
ArrayRef<Expr *> Args,
AssociatedNamespaceSet &AssociatedNamespaces,
AssociatedClassSet &AssociatedClasses);
void FilterLookupForScope(LookupResult &R, DeclContext *Ctx, Scope *S,
bool ConsiderLinkage, bool AllowInlineNamespace);
bool CheckRedeclarationModuleOwnership(NamedDecl *New, NamedDecl *Old);
void DiagnoseAmbiguousLookup(LookupResult &Result);
//@}
ObjCInterfaceDecl *getObjCInterfaceDecl(IdentifierInfo *&Id,
SourceLocation IdLoc,
bool TypoCorrection = false);
NamedDecl *LazilyCreateBuiltin(IdentifierInfo *II, unsigned ID,
Scope *S, bool ForRedeclaration,
SourceLocation Loc);
NamedDecl *ImplicitlyDefineFunction(SourceLocation Loc, IdentifierInfo &II,
Scope *S);
void AddKnownFunctionAttributes(FunctionDecl *FD);
// More parsing and symbol table subroutines.
void ProcessPragmaWeak(Scope *S, Decl *D);
// Decl attributes - this routine is the top level dispatcher.
void ProcessDeclAttributes(Scope *S, Decl *D, const Declarator &PD);
// Helper for delayed processing of attributes.
void ProcessDeclAttributeDelayed(Decl *D,
const ParsedAttributesView &AttrList);
void ProcessDeclAttributeList(Scope *S, Decl *D, const ParsedAttributesView &AL,
bool IncludeCXX11Attributes = true);
bool ProcessAccessDeclAttributeList(AccessSpecDecl *ASDecl,
const ParsedAttributesView &AttrList);
void checkUnusedDeclAttributes(Declarator &D);
/// Determine if type T is a valid subject for a nonnull and similar
/// attributes. By default, we look through references (the behavior used by
/// nonnull), but if the second parameter is true, then we treat a reference
/// type as valid.
bool isValidPointerAttrType(QualType T, bool RefOkay = false);
bool CheckRegparmAttr(const ParsedAttr &attr, unsigned &value);
bool CheckCallingConvAttr(const ParsedAttr &attr, CallingConv &CC,
const FunctionDecl *FD = nullptr);
bool CheckAttrTarget(const ParsedAttr &CurrAttr);
bool CheckAttrNoArgs(const ParsedAttr &CurrAttr);
bool checkStringLiteralArgumentAttr(const ParsedAttr &Attr, unsigned ArgNum,
StringRef &Str,
SourceLocation *ArgLocation = nullptr);
bool checkSectionName(SourceLocation LiteralLoc, StringRef Str);
bool checkTargetAttr(SourceLocation LiteralLoc, StringRef Str);
bool checkMSInheritanceAttrOnDefinition(
CXXRecordDecl *RD, SourceRange Range, bool BestCase,
MSInheritanceAttr::Spelling SemanticSpelling);
void CheckAlignasUnderalignment(Decl *D);
/// Adjust the calling convention of a method to be the ABI default if it
/// wasn't specified explicitly. This handles method types formed from
/// function type typedefs and typename template arguments.
void adjustMemberFunctionCC(QualType &T, bool IsStatic, bool IsCtorOrDtor,
SourceLocation Loc);
// Check if there is an explicit attribute, but only look through parens.
// The intent is to look for an attribute on the current declarator, but not
// one that came from a typedef.
bool hasExplicitCallingConv(QualType T);
/// Get the outermost AttributedType node that sets a calling convention.
/// Valid types should not have multiple attributes with different CCs.
const AttributedType *getCallingConvAttributedType(QualType T) const;
/// Stmt attributes - this routine is the top level dispatcher.
StmtResult ProcessStmtAttributes(Stmt *Stmt,
const ParsedAttributesView &Attrs,
SourceRange Range);
void WarnConflictingTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
void CheckConflictingOverridingMethod(ObjCMethodDecl *Method,
ObjCMethodDecl *Overridden,
bool IsProtocolMethodDecl);
/// WarnExactTypedMethods - This routine issues a warning if method
/// implementation declaration matches exactly that of its declaration.
void WarnExactTypedMethods(ObjCMethodDecl *Method,
ObjCMethodDecl *MethodDecl,
bool IsProtocolMethodDecl);
typedef llvm::SmallPtrSet<Selector, 8> SelectorSet;
/// CheckImplementationIvars - This routine checks if the instance variables
/// listed in the implelementation match those listed in the interface.
void CheckImplementationIvars(ObjCImplementationDecl *ImpDecl,
ObjCIvarDecl **Fields, unsigned nIvars,
SourceLocation Loc);
/// ImplMethodsVsClassMethods - This is main routine to warn if any method
/// remains unimplemented in the class or category \@implementation.
void ImplMethodsVsClassMethods(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool IncompleteImpl = false);
/// DiagnoseUnimplementedProperties - This routine warns on those properties
/// which must be implemented by this implementation.
void DiagnoseUnimplementedProperties(Scope *S, ObjCImplDecl* IMPDecl,
ObjCContainerDecl *CDecl,
bool SynthesizeProperties);
/// Diagnose any null-resettable synthesized setters.
void diagnoseNullResettableSynthesizedSetters(const ObjCImplDecl *impDecl);
/// DefaultSynthesizeProperties - This routine default synthesizes all
/// properties which must be synthesized in the class's \@implementation.
void DefaultSynthesizeProperties(Scope *S, ObjCImplDecl *IMPDecl,
ObjCInterfaceDecl *IDecl,
SourceLocation AtEnd);
void DefaultSynthesizeProperties(Scope *S, Decl *D, SourceLocation AtEnd);
/// IvarBacksCurrentMethodAccessor - This routine returns 'true' if 'IV' is
/// an ivar synthesized for 'Method' and 'Method' is a property accessor
/// declared in class 'IFace'.
bool IvarBacksCurrentMethodAccessor(ObjCInterfaceDecl *IFace,
ObjCMethodDecl *Method, ObjCIvarDecl *IV);
/// DiagnoseUnusedBackingIvarInAccessor - Issue an 'unused' warning if ivar which
/// backs the property is not used in the property's accessor.
void DiagnoseUnusedBackingIvarInAccessor(Scope *S,
const ObjCImplementationDecl *ImplD);
/// GetIvarBackingPropertyAccessor - If method is a property setter/getter and
/// it property has a backing ivar, returns this ivar; otherwise, returns NULL.
/// It also returns ivar's property on success.
ObjCIvarDecl *GetIvarBackingPropertyAccessor(const ObjCMethodDecl *Method,
const ObjCPropertyDecl *&PDecl) const;
/// Called by ActOnProperty to handle \@property declarations in
/// class extensions.
ObjCPropertyDecl *HandlePropertyInClassExtension(Scope *S,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
unsigned &Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind);
/// Called by ActOnProperty and HandlePropertyInClassExtension to
/// handle creating the ObjcPropertyDecl for a category or \@interface.
ObjCPropertyDecl *CreatePropertyDecl(Scope *S,
ObjCContainerDecl *CDecl,
SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD,
Selector GetterSel,
SourceLocation GetterNameLoc,
Selector SetterSel,
SourceLocation SetterNameLoc,
const bool isReadWrite,
const unsigned Attributes,
const unsigned AttributesAsWritten,
QualType T,
TypeSourceInfo *TSI,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
/// AtomicPropertySetterGetterRules - This routine enforces the rule (via
/// warning) when atomic property has one but not the other user-declared
/// setter or getter.
void AtomicPropertySetterGetterRules(ObjCImplDecl* IMPDecl,
ObjCInterfaceDecl* IDecl);
void DiagnoseOwningPropertyGetterSynthesis(const ObjCImplementationDecl *D);
void DiagnoseMissingDesignatedInitOverrides(
const ObjCImplementationDecl *ImplD,
const ObjCInterfaceDecl *IFD);
void DiagnoseDuplicateIvars(ObjCInterfaceDecl *ID, ObjCInterfaceDecl *SID);
enum MethodMatchStrategy {
MMS_loose,
MMS_strict
};
/// MatchTwoMethodDeclarations - Checks if two methods' type match and returns
/// true, or false, accordingly.
bool MatchTwoMethodDeclarations(const ObjCMethodDecl *Method,
const ObjCMethodDecl *PrevMethod,
MethodMatchStrategy strategy = MMS_strict);
/// MatchAllMethodDeclarations - Check methods declaraed in interface or
/// or protocol against those declared in their implementations.
void MatchAllMethodDeclarations(const SelectorSet &InsMap,
const SelectorSet &ClsMap,
SelectorSet &InsMapSeen,
SelectorSet &ClsMapSeen,
ObjCImplDecl* IMPDecl,
ObjCContainerDecl* IDecl,
bool &IncompleteImpl,
bool ImmediateClass,
bool WarnCategoryMethodImpl=false);
/// CheckCategoryVsClassMethodMatches - Checks that methods implemented in
/// category matches with those implemented in its primary class and
/// warns each time an exact match is found.
void CheckCategoryVsClassMethodMatches(ObjCCategoryImplDecl *CatIMP);
/// Add the given method to the list of globally-known methods.
void addMethodToGlobalList(ObjCMethodList *List, ObjCMethodDecl *Method);
private:
/// AddMethodToGlobalPool - Add an instance or factory method to the global
/// pool. See descriptoin of AddInstanceMethodToGlobalPool.
void AddMethodToGlobalPool(ObjCMethodDecl *Method, bool impl, bool instance);
/// LookupMethodInGlobalPool - Returns the instance or factory method and
/// optionally warns if there are multiple signatures.
ObjCMethodDecl *LookupMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass,
bool instance);
public:
/// - Returns instance or factory methods in global method pool for
/// given selector. It checks the desired kind first, if none is found, and
/// parameter checkTheOther is set, it then checks the other kind. If no such
/// method or only one method is found, function returns false; otherwise, it
/// returns true.
bool
CollectMultipleMethodsInGlobalPool(Selector Sel,
SmallVectorImpl<ObjCMethodDecl*>& Methods,
bool InstanceFirst, bool CheckTheOther,
const ObjCObjectType *TypeBound = nullptr);
bool
AreMultipleMethodsInGlobalPool(Selector Sel, ObjCMethodDecl *BestMethod,
SourceRange R, bool receiverIdOrClass,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
void
DiagnoseMultipleMethodInGlobalPool(SmallVectorImpl<ObjCMethodDecl*> &Methods,
Selector Sel, SourceRange R,
bool receiverIdOrClass);
private:
/// - Returns a selector which best matches given argument list or
/// nullptr if none could be found
ObjCMethodDecl *SelectBestMethod(Selector Sel, MultiExprArg Args,
bool IsInstance,
SmallVectorImpl<ObjCMethodDecl*>& Methods);
/// Record the typo correction failure and return an empty correction.
TypoCorrection FailedCorrection(IdentifierInfo *Typo, SourceLocation TypoLoc,
bool RecordFailure = true) {
if (RecordFailure)
TypoCorrectionFailures[Typo].insert(TypoLoc);
return TypoCorrection();
}
public:
/// AddInstanceMethodToGlobalPool - All instance methods in a translation
/// unit are added to a global pool. This allows us to efficiently associate
/// a selector with a method declaraation for purposes of typechecking
/// messages sent to "id" (where the class of the object is unknown).
void AddInstanceMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/true);
}
/// AddFactoryMethodToGlobalPool - Same as above, but for factory methods.
void AddFactoryMethodToGlobalPool(ObjCMethodDecl *Method, bool impl=false) {
AddMethodToGlobalPool(Method, impl, /*instance*/false);
}
/// AddAnyMethodToGlobalPool - Add any method, instance or factory to global
/// pool.
void AddAnyMethodToGlobalPool(Decl *D);
/// LookupInstanceMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupInstanceMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/true);
}
/// LookupFactoryMethodInGlobalPool - Returns the method and warns if
/// there are multiple signatures.
ObjCMethodDecl *LookupFactoryMethodInGlobalPool(Selector Sel, SourceRange R,
bool receiverIdOrClass=false) {
return LookupMethodInGlobalPool(Sel, R, receiverIdOrClass,
/*instance*/false);
}
const ObjCMethodDecl *SelectorsForTypoCorrection(Selector Sel,
QualType ObjectType=QualType());
/// LookupImplementedMethodInGlobalPool - Returns the method which has an
/// implementation.
ObjCMethodDecl *LookupImplementedMethodInGlobalPool(Selector Sel);
/// CollectIvarsToConstructOrDestruct - Collect those ivars which require
/// initialization.
void CollectIvarsToConstructOrDestruct(ObjCInterfaceDecl *OI,
SmallVectorImpl<ObjCIvarDecl*> &Ivars);
//===--------------------------------------------------------------------===//
// Statement Parsing Callbacks: SemaStmt.cpp.
public:
class FullExprArg {
public:
FullExprArg() : E(nullptr) { }
FullExprArg(Sema &actions) : E(nullptr) { }
ExprResult release() {
return E;
}
Expr *get() const { return E; }
Expr *operator->() {
return E;
}
private:
// FIXME: No need to make the entire Sema class a friend when it's just
// Sema::MakeFullExpr that needs access to the constructor below.
friend class Sema;
explicit FullExprArg(Expr *expr) : E(expr) {}
Expr *E;
};
FullExprArg MakeFullExpr(Expr *Arg) {
return MakeFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation());
}
FullExprArg MakeFullExpr(Expr *Arg, SourceLocation CC) {
return FullExprArg(
ActOnFinishFullExpr(Arg, CC, /*DiscardedValue*/ false).get());
}
FullExprArg MakeFullDiscardedValueExpr(Expr *Arg) {
ExprResult FE =
ActOnFinishFullExpr(Arg, Arg ? Arg->getExprLoc() : SourceLocation(),
/*DiscardedValue*/ true);
return FullExprArg(FE.get());
}
StmtResult ActOnExprStmt(ExprResult Arg, bool DiscardedValue = true);
StmtResult ActOnExprStmtError();
StmtResult ActOnNullStmt(SourceLocation SemiLoc,
bool HasLeadingEmptyMacro = false);
void ActOnStartOfCompoundStmt(bool IsStmtExpr);
void ActOnFinishOfCompoundStmt();
StmtResult ActOnCompoundStmt(SourceLocation L, SourceLocation R,
ArrayRef<Stmt *> Elts, bool isStmtExpr,
CheckedScopeSpecifier WrittenCSS = CSS_None,
SourceLocation CSSLoc = SourceLocation(),
SourceLocation CSMLoc = SourceLocation());
private:
CheckedScopeSpecifier CheckingKind;
// Keep a stack of saved checked scope information.
class SavedCheckedScope {
public:
SavedCheckedScope(CheckedScopeSpecifier S, SourceLocation L) :
Loc(L), Saved(S) {}
SourceLocation Loc;
CheckedScopeSpecifier Saved;
};
SmallVector<SavedCheckedScope, 8> CheckingKindStack; // can be empty
public:
CheckedScopeSpecifier GetCheckedScopeInfo() {
return CheckingKind;
}
void SetCheckedScopeInfo(CheckedScopeSpecifier CSS) {
CheckingKind = CSS;
}
void PushCheckedScopeInfo(SourceLocation Loc) {
CheckingKindStack.push_back(SavedCheckedScope(CheckingKind, Loc));
}
bool PopCheckedScopeInfo() {
if (CheckingKindStack.size() > 0) {
CheckingKind = CheckingKindStack.back().Saved;
CheckingKindStack.pop_back();
return false;
}
else
return true;
}
void DiagnoseUnterminatedCheckedScope();
bool IsCheckedScope() {
return CheckingKind != CSS_Unchecked;
}
class CheckedScopeRAII {
Sema &SemaRef;
CheckedScopeSpecifier PrevCheckingKind;
public:
CheckedScopeRAII(Sema &SemaRef, CheckedScopeSpecifier CSS)
: SemaRef(SemaRef),
PrevCheckingKind(SemaRef.CheckingKind) {
if (CSS != CSS_None)
SemaRef.CheckingKind = CSS;
}
CheckedScopeRAII(Sema &S, DeclSpec &DS) :
CheckedScopeRAII(S, DS.getCheckedScopeSpecifier()) {
}
~CheckedScopeRAII() {
SemaRef.CheckingKind = PrevCheckingKind;
}
};
/// A RAII object to enter scope of a compound statement.
class CompoundScopeRAII {
public:
CompoundScopeRAII(Sema &S, bool IsStmtExpr = false,
CheckedScopeSpecifier CSS = CSS_None):
S(S), CheckedProperties(S, CSS) {
S.ActOnStartOfCompoundStmt(IsStmtExpr);
}
~CompoundScopeRAII() {
S.ActOnFinishOfCompoundStmt();
}
private:
Sema &S;
CheckedScopeRAII CheckedProperties;
};
/// An RAII helper that pops function a function scope on exit.
struct FunctionScopeRAII {
Sema &S;
bool Active;
FunctionScopeRAII(Sema &S) : S(S), Active(true) {}
~FunctionScopeRAII() {
if (Active)
S.PopFunctionScopeInfo();
}
void disable() { Active = false; }
};
StmtResult ActOnDeclStmt(DeclGroupPtrTy Decl,
SourceLocation StartLoc,
SourceLocation EndLoc);
void ActOnForEachDeclStmt(DeclGroupPtrTy Decl);
StmtResult ActOnForEachLValueExpr(Expr *E);
ExprResult ActOnCaseExpr(SourceLocation CaseLoc, ExprResult Val);
StmtResult ActOnCaseStmt(SourceLocation CaseLoc, ExprResult LHS,
SourceLocation DotDotDotLoc, ExprResult RHS,
SourceLocation ColonLoc);
void ActOnCaseStmtBody(Stmt *CaseStmt, Stmt *SubStmt);
StmtResult ActOnDefaultStmt(SourceLocation DefaultLoc,
SourceLocation ColonLoc,
Stmt *SubStmt, Scope *CurScope);
StmtResult ActOnLabelStmt(SourceLocation IdentLoc, LabelDecl *TheDecl,
SourceLocation ColonLoc, Stmt *SubStmt);
StmtResult ActOnAttributedStmt(SourceLocation AttrLoc,
ArrayRef<const Attr*> Attrs,
Stmt *SubStmt);
class ConditionResult;
StmtResult ActOnIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult BuildIfStmt(SourceLocation IfLoc, bool IsConstexpr,
Stmt *InitStmt,
ConditionResult Cond, Stmt *ThenVal,
SourceLocation ElseLoc, Stmt *ElseVal);
StmtResult ActOnStartOfSwitchStmt(SourceLocation SwitchLoc,
Stmt *InitStmt,
ConditionResult Cond);
StmtResult ActOnFinishSwitchStmt(SourceLocation SwitchLoc,
Stmt *Switch, Stmt *Body);
StmtResult ActOnWhileStmt(SourceLocation WhileLoc, ConditionResult Cond,
Stmt *Body);
StmtResult ActOnDoStmt(SourceLocation DoLoc, Stmt *Body,
SourceLocation WhileLoc, SourceLocation CondLParen,
Expr *Cond, SourceLocation CondRParen);
StmtResult ActOnForStmt(SourceLocation ForLoc,
SourceLocation LParenLoc,
Stmt *First,
ConditionResult Second,
FullExprArg Third,
SourceLocation RParenLoc,
Stmt *Body);
ExprResult CheckObjCForCollectionOperand(SourceLocation forLoc,
Expr *collection);
StmtResult ActOnObjCForCollectionStmt(SourceLocation ForColLoc,
Stmt *First, Expr *collection,
SourceLocation RParenLoc);
StmtResult FinishObjCForCollectionStmt(Stmt *ForCollection, Stmt *Body);
enum BuildForRangeKind {
/// Initial building of a for-range statement.
BFRK_Build,
/// Instantiation or recovery rebuild of a for-range statement. Don't
/// attempt any typo-correction.
BFRK_Rebuild,
/// Determining whether a for-range statement could be built. Avoid any
/// unnecessary or irreversible actions.
BFRK_Check
};
StmtResult ActOnCXXForRangeStmt(Scope *S, SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
Stmt *LoopVar,
SourceLocation ColonLoc, Expr *Collection,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult BuildCXXForRangeStmt(SourceLocation ForLoc,
SourceLocation CoawaitLoc,
Stmt *InitStmt,
SourceLocation ColonLoc,
Stmt *RangeDecl, Stmt *Begin, Stmt *End,
Expr *Cond, Expr *Inc,
Stmt *LoopVarDecl,
SourceLocation RParenLoc,
BuildForRangeKind Kind);
StmtResult FinishCXXForRangeStmt(Stmt *ForRange, Stmt *Body);
StmtResult ActOnGotoStmt(SourceLocation GotoLoc,
SourceLocation LabelLoc,
LabelDecl *TheDecl);
StmtResult ActOnIndirectGotoStmt(SourceLocation GotoLoc,
SourceLocation StarLoc,
Expr *DestExp);
StmtResult ActOnContinueStmt(SourceLocation ContinueLoc, Scope *CurScope);
StmtResult ActOnBreakStmt(SourceLocation BreakLoc, Scope *CurScope);
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind, unsigned NumParams);
typedef std::pair<StringRef, QualType> CapturedParamNameType;
void ActOnCapturedRegionStart(SourceLocation Loc, Scope *CurScope,
CapturedRegionKind Kind,
ArrayRef<CapturedParamNameType> Params);
StmtResult ActOnCapturedRegionEnd(Stmt *S);
void ActOnCapturedRegionError();
RecordDecl *CreateCapturedStmtRecordDecl(CapturedDecl *&CD,
SourceLocation Loc,
unsigned NumParams);
enum CopyElisionSemanticsKind {
CES_Strict = 0,
CES_AllowParameters = 1,
CES_AllowDifferentTypes = 2,
CES_AllowExceptionVariables = 4,
CES_FormerDefault = (CES_AllowParameters),
CES_Default = (CES_AllowParameters | CES_AllowDifferentTypes),
CES_AsIfByStdMove = (CES_AllowParameters | CES_AllowDifferentTypes |
CES_AllowExceptionVariables),
};
VarDecl *getCopyElisionCandidate(QualType ReturnType, Expr *E,
CopyElisionSemanticsKind CESK);
bool isCopyElisionCandidate(QualType ReturnType, const VarDecl *VD,
CopyElisionSemanticsKind CESK);
StmtResult ActOnReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp,
Scope *CurScope);
StmtResult BuildReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnCapScopeReturnStmt(SourceLocation ReturnLoc, Expr *RetValExp);
StmtResult ActOnGCCAsmStmt(SourceLocation AsmLoc, bool IsSimple,
bool IsVolatile, unsigned NumOutputs,
unsigned NumInputs, IdentifierInfo **Names,
MultiExprArg Constraints, MultiExprArg Exprs,
Expr *AsmString, MultiExprArg Clobbers,
unsigned NumLabels,
SourceLocation RParenLoc);
void FillInlineAsmIdentifierInfo(Expr *Res,
llvm::InlineAsmIdentifierInfo &Info);
ExprResult LookupInlineAsmIdentifier(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Id,
bool IsUnevaluatedContext);
bool LookupInlineAsmField(StringRef Base, StringRef Member,
unsigned &Offset, SourceLocation AsmLoc);
ExprResult LookupInlineAsmVarDeclField(Expr *RefExpr, StringRef Member,
SourceLocation AsmLoc);
StmtResult ActOnMSAsmStmt(SourceLocation AsmLoc, SourceLocation LBraceLoc,
ArrayRef<Token> AsmToks,
StringRef AsmString,
unsigned NumOutputs, unsigned NumInputs,
ArrayRef<StringRef> Constraints,
ArrayRef<StringRef> Clobbers,
ArrayRef<Expr*> Exprs,
SourceLocation EndLoc);
LabelDecl *GetOrCreateMSAsmLabel(StringRef ExternalLabelName,
SourceLocation Location,
bool AlwaysCreate);
VarDecl *BuildObjCExceptionDecl(TypeSourceInfo *TInfo, QualType ExceptionType,
SourceLocation StartLoc,
SourceLocation IdLoc, IdentifierInfo *Id,
bool Invalid = false);
Decl *ActOnObjCExceptionDecl(Scope *S, Declarator &D);
StmtResult ActOnObjCAtCatchStmt(SourceLocation AtLoc, SourceLocation RParen,
Decl *Parm, Stmt *Body);
StmtResult ActOnObjCAtFinallyStmt(SourceLocation AtLoc, Stmt *Body);
StmtResult ActOnObjCAtTryStmt(SourceLocation AtLoc, Stmt *Try,
MultiStmtArg Catch, Stmt *Finally);
StmtResult BuildObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw);
StmtResult ActOnObjCAtThrowStmt(SourceLocation AtLoc, Expr *Throw,
Scope *CurScope);
ExprResult ActOnObjCAtSynchronizedOperand(SourceLocation atLoc,
Expr *operand);
StmtResult ActOnObjCAtSynchronizedStmt(SourceLocation AtLoc,
Expr *SynchExpr,
Stmt *SynchBody);
StmtResult ActOnObjCAutoreleasePoolStmt(SourceLocation AtLoc, Stmt *Body);
VarDecl *BuildExceptionDeclaration(Scope *S, TypeSourceInfo *TInfo,
SourceLocation StartLoc,
SourceLocation IdLoc,
IdentifierInfo *Id);
Decl *ActOnExceptionDeclarator(Scope *S, Declarator &D);
StmtResult ActOnCXXCatchBlock(SourceLocation CatchLoc,
Decl *ExDecl, Stmt *HandlerBlock);
StmtResult ActOnCXXTryBlock(SourceLocation TryLoc, Stmt *TryBlock,
ArrayRef<Stmt *> Handlers);
StmtResult ActOnSEHTryBlock(bool IsCXXTry, // try (true) or __try (false) ?
SourceLocation TryLoc, Stmt *TryBlock,
Stmt *Handler);
StmtResult ActOnSEHExceptBlock(SourceLocation Loc,
Expr *FilterExpr,
Stmt *Block);
void ActOnStartSEHFinallyBlock();
void ActOnAbortSEHFinallyBlock();
StmtResult ActOnFinishSEHFinallyBlock(SourceLocation Loc, Stmt *Block);
StmtResult ActOnSEHLeaveStmt(SourceLocation Loc, Scope *CurScope);
void DiagnoseReturnInConstructorExceptionHandler(CXXTryStmt *TryBlock);
bool ShouldWarnIfUnusedFileScopedDecl(const DeclaratorDecl *D) const;
/// If it's a file scoped decl that must warn if not used, keep track
/// of it.
void MarkUnusedFileScopedDecl(const DeclaratorDecl *D);
/// DiagnoseUnusedExprResult - If the statement passed in is an expression
/// whose result is unused, warn.
void DiagnoseUnusedExprResult(const Stmt *S);
void DiagnoseUnusedNestedTypedefs(const RecordDecl *D);
void DiagnoseUnusedDecl(const NamedDecl *ND);
/// Emit \p DiagID if statement located on \p StmtLoc has a suspicious null
/// statement as a \p Body, and it is located on the same line.
///
/// This helps prevent bugs due to typos, such as:
/// if (condition);
/// do_stuff();
void DiagnoseEmptyStmtBody(SourceLocation StmtLoc,
const Stmt *Body,
unsigned DiagID);
/// Warn if a for/while loop statement \p S, which is followed by
/// \p PossibleBody, has a suspicious null statement as a body.
void DiagnoseEmptyLoopBody(const Stmt *S,
const Stmt *PossibleBody);
/// Warn if a value is moved to itself.
void DiagnoseSelfMove(const Expr *LHSExpr, const Expr *RHSExpr,
SourceLocation OpLoc);
enum CheckedScopeTypeLocation {
CSTL_TopLevel,
CSTL_Nested,
CSTL_BoundsSafeInterface
};
/// Returns true if Ty is allowed in a checked scope:
/// - If Ty is a pointer or array type, it must be a checked pointer or
/// array type or an unchecked pointer or array type with a bounds-safe
/// interface.
/// - This rule applies recursively to any types nested within Ty.
/// - All other types are allowed in checked scopes.
/// Return false if Ty is not allowed.
bool AllowedInCheckedScope(QualType Ty,
const InteropTypeExpr *InteropType,
bool IsParam, CheckedScopeTypeLocation Loc,
CheckedScopeTypeLocation &ProblemLoc,
QualType &ProblemTy);
// Enum for diagnostic message that describes the type of declaration
// being checked.
enum CheckedDeclKind {
CDK_Parameter,
CDK_FunctionReturn,
CDK_LocalVariable,
CDK_GlobalVariable,
CDK_Member
};
/// \param D - target declaration
/// \param UseLoc - default invalid location at declaration
/// it is valid only if it is regarded as use of variable
/// \returns true if target declaration is valid checked decl
bool DiagnoseCheckedDecl(const ValueDecl *D,
SourceLocation UseLoc = SourceLocation());
bool DiagnoseTypeInCheckedScope(QualType Ty, SourceLocation Start, SourceLocation End);
/// Warn if we're implicitly casting from a _Nullable pointer type to a
/// _Nonnull one.
void diagnoseNullableToNonnullConversion(QualType DstType, QualType SrcType,
SourceLocation Loc);
/// Warn when implicitly casting 0 to nullptr.
void diagnoseZeroToNullptrConversion(CastKind Kind, const Expr *E);
ParsingDeclState PushParsingDeclaration(sema::DelayedDiagnosticPool &pool) {
return DelayedDiagnostics.push(pool);
}
void PopParsingDeclaration(ParsingDeclState state, Decl *decl);
typedef ProcessingContextState ParsingClassState;
ParsingClassState PushParsingClass() {
return DelayedDiagnostics.pushUndelayed();
}
void PopParsingClass(ParsingClassState state) {
DelayedDiagnostics.popUndelayed(state);
}
void redelayDiagnostics(sema::DelayedDiagnosticPool &pool);
void DiagnoseAvailabilityOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass,
bool ObjCPropertyAccess,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReceiver = nullptr);
bool makeUnavailableInSystemHeader(SourceLocation loc,
UnavailableAttr::ImplicitReason reason);
/// Issue any -Wunguarded-availability warnings in \c FD
void DiagnoseUnguardedAvailabilityViolations(Decl *FD);
//===--------------------------------------------------------------------===//
// Expression Parsing Callbacks: SemaExpr.cpp.
bool CanUseDecl(NamedDecl *D, bool TreatUnavailableAsInvalid);
bool DiagnoseUseOfDecl(NamedDecl *D, ArrayRef<SourceLocation> Locs,
const ObjCInterfaceDecl *UnknownObjCClass = nullptr,
bool ObjCPropertyAccess = false,
bool AvoidPartialAvailabilityChecks = false,
ObjCInterfaceDecl *ClassReciever = nullptr);
void NoteDeletedFunction(FunctionDecl *FD);
void NoteDeletedInheritingConstructor(CXXConstructorDecl *CD);
bool DiagnosePropertyAccessorMismatch(ObjCPropertyDecl *PD,
ObjCMethodDecl *Getter,
SourceLocation Loc);
void DiagnoseSentinelCalls(NamedDecl *D, SourceLocation Loc,
ArrayRef<Expr *> Args);
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, Decl *LambdaContextDecl = nullptr,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
enum ReuseLambdaContextDecl_t { ReuseLambdaContextDecl };
void PushExpressionEvaluationContext(
ExpressionEvaluationContext NewContext, ReuseLambdaContextDecl_t,
ExpressionEvaluationContextRecord::ExpressionKind Type =
ExpressionEvaluationContextRecord::EK_Other);
void PopExpressionEvaluationContext();
void DiscardCleanupsInEvaluationContext();
ExprResult TransformToPotentiallyEvaluated(Expr *E);
ExprResult HandleExprEvaluationContextForTypeof(Expr *E);
ExprResult ActOnConstantExpression(ExprResult Res);
// Functions for marking a declaration referenced. These functions also
// contain the relevant logic for marking if a reference to a function or
// variable is an odr-use (in the C++11 sense). There are separate variants
// for expressions referring to a decl; these exist because odr-use marking
// needs to be delayed for some constant variables when we build one of the
// named expressions.
//
// MightBeOdrUse indicates whether the use could possibly be an odr-use, and
// should usually be true. This only needs to be set to false if the lack of
// odr-use cannot be determined from the current context (for instance,
// because the name denotes a virtual function and was written without an
// explicit nested-name-specifier).
void MarkAnyDeclReferenced(SourceLocation Loc, Decl *D, bool MightBeOdrUse);
void MarkFunctionReferenced(SourceLocation Loc, FunctionDecl *Func,
bool MightBeOdrUse = true);
void MarkVariableReferenced(SourceLocation Loc, VarDecl *Var);
void MarkDeclRefReferenced(DeclRefExpr *E, const Expr *Base = nullptr);
void MarkMemberReferenced(MemberExpr *E);
void MarkFunctionParmPackReferenced(FunctionParmPackExpr *E);
void MarkCaptureUsedInEnclosingContext(VarDecl *Capture, SourceLocation Loc,
unsigned CapturingScopeIndex);
ExprResult CheckLValueToRValueConversionOperand(Expr *E);
void CleanupVarDeclMarking();
enum TryCaptureKind {
TryCapture_Implicit, TryCapture_ExplicitByVal, TryCapture_ExplicitByRef
};
/// Try to capture the given variable.
///
/// \param Var The variable to capture.
///
/// \param Loc The location at which the capture occurs.
///
/// \param Kind The kind of capture, which may be implicit (for either a
/// block or a lambda), or explicit by-value or by-reference (for a lambda).
///
/// \param EllipsisLoc The location of the ellipsis, if one is provided in
/// an explicit lambda capture.
///
/// \param BuildAndDiagnose Whether we are actually supposed to add the
/// captures or diagnose errors. If false, this routine merely check whether
/// the capture can occur without performing the capture itself or complaining
/// if the variable cannot be captured.
///
/// \param CaptureType Will be set to the type of the field used to capture
/// this variable in the innermost block or lambda. Only valid when the
/// variable can be captured.
///
/// \param DeclRefType Will be set to the type of a reference to the capture
/// from within the current scope. Only valid when the variable can be
/// captured.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// variables that may or may not be used in certain specializations of
/// a nested generic lambda.
///
/// \returns true if an error occurred (i.e., the variable cannot be
/// captured) and false if the capture succeeded.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc, TryCaptureKind Kind,
SourceLocation EllipsisLoc, bool BuildAndDiagnose,
QualType &CaptureType,
QualType &DeclRefType,
const unsigned *const FunctionScopeIndexToStopAt);
/// Try to capture the given variable.
bool tryCaptureVariable(VarDecl *Var, SourceLocation Loc,
TryCaptureKind Kind = TryCapture_Implicit,
SourceLocation EllipsisLoc = SourceLocation());
/// Checks if the variable must be captured.
bool NeedToCaptureVariable(VarDecl *Var, SourceLocation Loc);
/// Given a variable, determine the type that a reference to that
/// variable will have in the given scope.
QualType getCapturedDeclRefType(VarDecl *Var, SourceLocation Loc);
/// Mark all of the declarations referenced within a particular AST node as
/// referenced. Used when template instantiation instantiates a non-dependent
/// type -- entities referenced by the type are now referenced.
void MarkDeclarationsReferencedInType(SourceLocation Loc, QualType T);
void MarkDeclarationsReferencedInExpr(Expr *E,
bool SkipLocalVariables = false);
/// Try to recover by turning the given expression into a
/// call. Returns true if recovery was attempted or an error was
/// emitted; this may also leave the ExprResult invalid.
bool tryToRecoverWithCall(ExprResult &E, const PartialDiagnostic &PD,
bool ForceComplain = false,
bool (*IsPlausibleResult)(QualType) = nullptr);
/// Figure out if an expression could be turned into a call.
bool tryExprAsCall(Expr &E, QualType &ZeroArgCallReturnTy,
UnresolvedSetImpl &NonTemplateOverloads);
/// Conditionally issue a diagnostic based on the current
/// evaluation context.
///
/// \param Statement If Statement is non-null, delay reporting the
/// diagnostic until the function body is parsed, and then do a basic
/// reachability analysis to determine if the statement is reachable.
/// If it is unreachable, the diagnostic will not be emitted.
bool DiagRuntimeBehavior(SourceLocation Loc, const Stmt *Statement,
const PartialDiagnostic &PD);
/// Similar, but diagnostic is only produced if all the specified statements
/// are reachable.
bool DiagRuntimeBehavior(SourceLocation Loc, ArrayRef<const Stmt*> Stmts,
const PartialDiagnostic &PD);
// Primary Expressions.
SourceRange getExprRange(Expr *E) const;
ExprResult ActOnIdExpression(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
UnqualifiedId &Id, bool HasTrailingLParen, bool IsAddressOfOperand,
CorrectionCandidateCallback *CCC = nullptr,
bool IsInlineAsmIdentifier = false, Token *KeywordReplacement = nullptr);
void DecomposeUnqualifiedId(const UnqualifiedId &Id,
TemplateArgumentListInfo &Buffer,
DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *&TemplateArgs);
bool
DiagnoseEmptyLookup(Scope *S, CXXScopeSpec &SS, LookupResult &R,
CorrectionCandidateCallback &CCC,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr,
ArrayRef<Expr *> Args = None, TypoExpr **Out = nullptr);
ExprResult LookupInObjCMethod(LookupResult &LookUp, Scope *S,
IdentifierInfo *II,
bool AllowBuiltinCreation=false);
ExprResult ActOnDependentIdExpression(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
bool isAddressOfOperand,
const TemplateArgumentListInfo *TemplateArgs);
/// If \p D cannot be odr-used in the current expression evaluation context,
/// return a reason explaining why. Otherwise, return NOUR_None.
NonOdrUseReason getNonOdrUseReasonInCurrentContext(ValueDecl *D);
ExprResult BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
SourceLocation Loc,
const CXXScopeSpec *SS = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
const CXXScopeSpec *SS = nullptr,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildDeclRefExpr(ValueDecl *D, QualType Ty, ExprValueKind VK,
const DeclarationNameInfo &NameInfo,
NestedNameSpecifierLoc NNS,
NamedDecl *FoundD = nullptr,
SourceLocation TemplateKWLoc = SourceLocation(),
const TemplateArgumentListInfo *TemplateArgs = nullptr);
ExprResult
BuildAnonymousStructUnionMemberReference(
const CXXScopeSpec &SS,
SourceLocation nameLoc,
IndirectFieldDecl *indirectField,
DeclAccessPair FoundDecl = DeclAccessPair::make(nullptr, AS_none),
Expr *baseObjectExpr = nullptr,
SourceLocation opLoc = SourceLocation());
ExprResult BuildPossibleImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S);
ExprResult BuildImplicitMemberExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
bool IsDefiniteInstance,
const Scope *S);
bool UseArgumentDependentLookup(const CXXScopeSpec &SS,
const LookupResult &R,
bool HasTrailingLParen);
ExprResult
BuildQualifiedDeclarationNameExpr(CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
bool IsAddressOfOperand, const Scope *S,
TypeSourceInfo **RecoveryTSI = nullptr);
ExprResult BuildDependentDeclRefExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildDeclarationNameExpr(const CXXScopeSpec &SS,
LookupResult &R,
bool NeedsADL,
bool AcceptInvalidDecl = false);
ExprResult BuildDeclarationNameExpr(
const CXXScopeSpec &SS, const DeclarationNameInfo &NameInfo, NamedDecl *D,
NamedDecl *FoundD = nullptr,
const TemplateArgumentListInfo *TemplateArgs = nullptr,
bool AcceptInvalidDecl = false);
ExprResult BuildLiteralOperatorCall(LookupResult &R,
DeclarationNameInfo &SuffixInfo,
ArrayRef<Expr *> Args,
SourceLocation LitEndLoc,
TemplateArgumentListInfo *ExplicitTemplateArgs = nullptr);
ExprResult BuildPredefinedExpr(SourceLocation Loc,
PredefinedExpr::IdentKind IK);
ExprResult ActOnPredefinedExpr(SourceLocation Loc, tok::TokenKind Kind);
ExprResult ActOnIntegerConstant(SourceLocation Loc, uint64_t Val);
bool CheckLoopHintExpr(Expr *E, SourceLocation Loc);
ExprResult ActOnNumericConstant(const Token &Tok, Scope *UDLScope = nullptr);
ExprResult ActOnCharacterConstant(const Token &Tok,
Scope *UDLScope = nullptr);
ExprResult ActOnParenExpr(SourceLocation L, SourceLocation R, Expr *E);
ExprResult ActOnParenListExpr(SourceLocation L,
SourceLocation R,
MultiExprArg Val);
/// ActOnStringLiteral - The specified tokens were lexed as pasted string
/// fragments (e.g. "foo" "bar" L"baz").
ExprResult ActOnStringLiteral(ArrayRef<Token> StringToks,
Scope *UDLScope = nullptr);
ExprResult ActOnGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<ParsedType> ArgTypes,
ArrayRef<Expr *> ArgExprs);
ExprResult CreateGenericSelectionExpr(SourceLocation KeyLoc,
SourceLocation DefaultLoc,
SourceLocation RParenLoc,
Expr *ControllingExpr,
ArrayRef<TypeSourceInfo *> Types,
ArrayRef<Expr *> Exprs);
// Binary/Unary Operators. 'Tok' is the token for the operator.
ExprResult CreateBuiltinUnaryOp(SourceLocation OpLoc, UnaryOperatorKind Opc,
Expr *InputExpr);
ExprResult BuildUnaryOp(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opc, Expr *Input);
ExprResult ActOnUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Op, Expr *Input);
bool isQualifiedMemberAccess(Expr *E);
QualType CheckAddressOfOperand(ExprResult &Operand, SourceLocation OpLoc);
ExprResult CreateUnaryExprOrTypeTraitExpr(TypeSourceInfo *TInfo,
SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
SourceRange R);
ExprResult CreateUnaryExprOrTypeTraitExpr(Expr *E, SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind);
ExprResult
ActOnUnaryExprOrTypeTraitExpr(SourceLocation OpLoc,
UnaryExprOrTypeTrait ExprKind,
bool IsType, void *TyOrEx,
SourceRange ArgRange);
ExprResult CheckPlaceholderExpr(Expr *E);
bool CheckVecStepExpr(Expr *E);
bool CheckUnaryExprOrTypeTraitOperand(Expr *E, UnaryExprOrTypeTrait ExprKind);
bool CheckUnaryExprOrTypeTraitOperand(QualType ExprType, SourceLocation OpLoc,
SourceRange ExprRange,
UnaryExprOrTypeTrait ExprKind);
ExprResult ActOnSizeofParameterPackExpr(Scope *S,
SourceLocation OpLoc,
IdentifierInfo &Name,
SourceLocation NameLoc,
SourceLocation RParenLoc);
ExprResult ActOnPostfixUnaryOp(Scope *S, SourceLocation OpLoc,
tok::TokenKind Kind, Expr *Input);
ExprResult ActOnArraySubscriptExpr(Scope *S, Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult CreateBuiltinArraySubscriptExpr(Expr *Base, SourceLocation LLoc,
Expr *Idx, SourceLocation RLoc);
ExprResult ActOnOMPArraySectionExpr(Expr *Base, SourceLocation LBLoc,
Expr *LowerBound, SourceLocation ColonLoc,
Expr *Length, SourceLocation RBLoc);
// This struct is for use by ActOnMemberAccess to allow
// BuildMemberReferenceExpr to be able to reinvoke ActOnMemberAccess after
// changing the access operator from a '.' to a '->' (to see if that is the
// change needed to fix an error about an unknown member, e.g. when the class
// defines a custom operator->).
struct ActOnMemberAccessExtraArgs {
Scope *S;
UnqualifiedId &Id;
Decl *ObjCImpDecl;
};
ExprResult BuildMemberReferenceExpr(
Expr *Base, QualType BaseType, SourceLocation OpLoc, bool IsArrow,
CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult
BuildMemberReferenceExpr(Expr *Base, QualType BaseType, SourceLocation OpLoc,
bool IsArrow, const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope, LookupResult &R,
const TemplateArgumentListInfo *TemplateArgs,
const Scope *S,
bool SuppressQualifierCheck = false,
ActOnMemberAccessExtraArgs *ExtraArgs = nullptr);
ExprResult BuildFieldReferenceExpr(Expr *BaseExpr, bool IsArrow,
SourceLocation OpLoc,
const CXXScopeSpec &SS, FieldDecl *Field,
DeclAccessPair FoundDecl,
const DeclarationNameInfo &MemberNameInfo);
ExprResult PerformMemberExprBaseConversion(Expr *Base, bool IsArrow);
bool CheckQualifiedMemberReference(Expr *BaseExpr, QualType BaseType,
const CXXScopeSpec &SS,
const LookupResult &R);
ExprResult ActOnDependentMemberExpr(Expr *Base, QualType BaseType,
bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
NamedDecl *FirstQualifierInScope,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult ActOnMemberAccessExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
UnqualifiedId &Member,
Decl *ObjCImpDecl);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
const CXXScopeSpec *SS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
MemberExpr *
BuildMemberExpr(Expr *Base, bool IsArrow, SourceLocation OpLoc,
NestedNameSpecifierLoc NNS, SourceLocation TemplateKWLoc,
ValueDecl *Member, DeclAccessPair FoundDecl,
bool HadMultipleCandidates,
const DeclarationNameInfo &MemberNameInfo, QualType Ty,
ExprValueKind VK, ExprObjectKind OK,
const TemplateArgumentListInfo *TemplateArgs = nullptr);
void ActOnDefaultCtorInitializers(Decl *CDtorDecl);
bool ConvertArgumentsForCall(CallExpr *Call, Expr *Fn,
FunctionDecl *FDecl,
const FunctionProtoType *Proto,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
bool ExecConfig = false);
void CheckStaticArrayArgument(SourceLocation CallLoc,
ParmVarDecl *Param,
const Expr *ArgExpr);
/// ActOnCallExpr - Handle a call to Fn with the specified array of arguments.
/// This provides the location of the left/right parens and a list of comma
/// locations.
ExprResult ActOnCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr);
ExprResult BuildCallExpr(Scope *S, Expr *Fn, SourceLocation LParenLoc,
MultiExprArg ArgExprs, SourceLocation RParenLoc,
Expr *ExecConfig = nullptr,
bool IsExecConfig = false);
ExprResult
BuildResolvedCallExpr(Expr *Fn, NamedDecl *NDecl, SourceLocation LParenLoc,
ArrayRef<Expr *> Arg, SourceLocation RParenLoc,
Expr *Config = nullptr, bool IsExecConfig = false,
ADLCallKind UsesADL = ADLCallKind::NotADL);
ExprResult ActOnCUDAExecConfigExpr(Scope *S, SourceLocation LLLLoc,
MultiExprArg ExecConfig,
SourceLocation GGGLoc);
ExprResult ActOnCastExpr(Scope *S, SourceLocation LParenLoc,
Declarator &D, ParsedType &Ty,
SourceLocation RParenLoc, Expr *CastExpr);
ExprResult BuildCStyleCastExpr(SourceLocation LParenLoc,
TypeSourceInfo *Ty,
SourceLocation RParenLoc,
Expr *Op,
bool isCheckedScope = false);
CastKind PrepareScalarCast(ExprResult &src, QualType destType);
/// Build an altivec or OpenCL literal.
ExprResult BuildVectorLiteral(SourceLocation LParenLoc,
SourceLocation RParenLoc, Expr *E,
TypeSourceInfo *TInfo);
ExprResult MaybeConvertParenListExprToParenExpr(Scope *S, Expr *ME);
ExprResult ActOnCompoundLiteral(SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc,
Expr *InitExpr);
ExprResult BuildCompoundLiteralExpr(SourceLocation LParenLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc,
Expr *LiteralExpr);
ExprResult ActOnInitList(SourceLocation LBraceLoc,
MultiExprArg InitArgList,
SourceLocation RBraceLoc);
ExprResult ActOnDesignatedInitializer(Designation &Desig,
SourceLocation Loc,
bool GNUSyntax,
ExprResult Init);
private:
static BinaryOperatorKind ConvertTokenKindToBinaryOpcode(tok::TokenKind Kind);
public:
ExprResult ActOnBinOp(Scope *S, SourceLocation TokLoc,
tok::TokenKind Kind, Expr *LHSExpr, Expr *RHSExpr);
ExprResult BuildBinOp(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opc, Expr *LHSExpr, Expr *RHSExpr);
ExprResult CreateBuiltinBinOp(SourceLocation OpLoc, BinaryOperatorKind Opc,
Expr *LHSExpr, Expr *RHSExpr);
void DiagnoseCommaOperator(const Expr *LHS, SourceLocation Loc);
/// ActOnConditionalOp - Parse a ?: operation. Note that 'LHS' may be null
/// in the case of a the GNU conditional expr extension.
ExprResult ActOnConditionalOp(SourceLocation QuestionLoc,
SourceLocation ColonLoc,
Expr *CondExpr, Expr *LHSExpr, Expr *RHSExpr);
/// ActOnAddrLabel - Parse the GNU address of label extension: "&&foo".
ExprResult ActOnAddrLabel(SourceLocation OpLoc, SourceLocation LabLoc,
LabelDecl *TheDecl);
void ActOnStartStmtExpr();
ExprResult ActOnStmtExpr(SourceLocation LPLoc, Stmt *SubStmt,
SourceLocation RPLoc); // "({..})"
// Handle the final expression in a statement expression.
ExprResult ActOnStmtExprResult(ExprResult E);
void ActOnStmtExprError();
// __builtin_offsetof(type, identifier(.identifier|[expr])*)
struct OffsetOfComponent {
SourceLocation LocStart, LocEnd;
bool isBrackets; // true if [expr], false if .ident
union {
IdentifierInfo *IdentInfo;
Expr *E;
} U;
};
/// __builtin_offsetof(type, a.b[123][456].c)
ExprResult BuildBuiltinOffsetOf(SourceLocation BuiltinLoc,
TypeSourceInfo *TInfo,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
ExprResult ActOnBuiltinOffsetOf(Scope *S,
SourceLocation BuiltinLoc,
SourceLocation TypeLoc,
ParsedType ParsedArgTy,
ArrayRef<OffsetOfComponent> Components,
SourceLocation RParenLoc);
// __builtin_choose_expr(constExpr, expr1, expr2)
ExprResult ActOnChooseExpr(SourceLocation BuiltinLoc,
Expr *CondExpr, Expr *LHSExpr,
Expr *RHSExpr, SourceLocation RPLoc);
// __builtin_va_arg(expr, type)
ExprResult ActOnVAArg(SourceLocation BuiltinLoc, Expr *E, ParsedType Ty,
SourceLocation RPLoc);
ExprResult BuildVAArgExpr(SourceLocation BuiltinLoc, Expr *E,
TypeSourceInfo *TInfo, SourceLocation RPLoc);
// __builtin_LINE(), __builtin_FUNCTION(), __builtin_FILE(),
// __builtin_COLUMN()
ExprResult ActOnSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc,
SourceLocation RPLoc);
// Build a potentially resolved SourceLocExpr.
ExprResult BuildSourceLocExpr(SourceLocExpr::IdentKind Kind,
SourceLocation BuiltinLoc, SourceLocation RPLoc,
DeclContext *ParentContext);
// __null
ExprResult ActOnGNUNullExpr(SourceLocation TokenLoc);
bool CheckCaseExpression(Expr *E);
/// Describes the result of an "if-exists" condition check.
enum IfExistsResult {
/// The symbol exists.
IER_Exists,
/// The symbol does not exist.
IER_DoesNotExist,
/// The name is a dependent name, so the results will differ
/// from one instantiation to the next.
IER_Dependent,
/// An error occurred.
IER_Error
};
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, CXXScopeSpec &SS,
const DeclarationNameInfo &TargetNameInfo);
IfExistsResult
CheckMicrosoftIfExistsSymbol(Scope *S, SourceLocation KeywordLoc,
bool IsIfExists, CXXScopeSpec &SS,
UnqualifiedId &Name);
StmtResult BuildMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
NestedNameSpecifierLoc QualifierLoc,
DeclarationNameInfo NameInfo,
Stmt *Nested);
StmtResult ActOnMSDependentExistsStmt(SourceLocation KeywordLoc,
bool IsIfExists,
CXXScopeSpec &SS, UnqualifiedId &Name,
Stmt *Nested);
//===------------------------- "Block" Extension ------------------------===//
/// ActOnBlockStart - This callback is invoked when a block literal is
/// started.
void ActOnBlockStart(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockArguments - This callback allows processing of block arguments.
/// If there are no arguments, this is still invoked.
void ActOnBlockArguments(SourceLocation CaretLoc, Declarator &ParamInfo,
Scope *CurScope);
/// ActOnBlockError - If there is an error parsing a block, this callback
/// is invoked to pop the information about the block from the action impl.
void ActOnBlockError(SourceLocation CaretLoc, Scope *CurScope);
/// ActOnBlockStmtExpr - This is called when the body of a block statement
/// literal was successfully completed. ^(int x){...}
ExprResult ActOnBlockStmtExpr(SourceLocation CaretLoc, Stmt *Body,
Scope *CurScope);
//===---------------------------- Checked C Extension ----------------------===//
private:
QualType ValidateBoundsExprArgument(Expr *Arg);
public:
ExprResult ActOnNullaryBoundsExpr(SourceLocation BoundKWLoc,
BoundsExpr::Kind Kind,
SourceLocation RParenLoc);
ExprResult ActOnCountBoundsExpr(SourceLocation BoundsKWLoc,
BoundsExpr::Kind Kind, Expr *CountExpr,
SourceLocation RParenLoc);
ExprResult ActOnRangeBoundsExpr(SourceLocation BoundsKWLoc, Expr *LowerBound,
Expr *UpperBound, SourceLocation RParenLoc);
ExprResult CreateRangeBoundsExpr(SourceLocation BoundsKWLoc, Expr *LowerBound,
Expr *UpperBound,
RelativeBoundsClause *Relative,
SourceLocation RParenLoc);
ExprResult ActOnBoundsInteropType(SourceLocation TypeKWLoc, ParsedType Ty,
SourceLocation RParenLoc);
ExprResult CreateBoundsInteropTypeExpr(SourceLocation TypeKWLoc,
TypeSourceInfo *TInfo,
SourceLocation RParenLoc);
ExprResult CreatePositionalParameterExpr(unsigned Index, QualType QT);
RelativeBoundsClause* ActOnRelativeTypeBoundsClause(SourceLocation BoundsKWLoc,
ParsedType Ty,
SourceLocation RParenLoc);
RelativeBoundsClause *
CreateRelativeTypeBoundsClause(SourceLocation BoundsKWLoc,
TypeSourceInfo *TyInfo,
SourceLocation RParenLoc);
RelativeBoundsClause* ActOnRelativeConstExprClause(Expr *ConstExpr,
SourceLocation BoundsKWLoc,
SourceLocation RParenLoc);
bool CheckBoundsCastBaseType(Expr *E1);
ExprResult
ActOnBoundsCastExprBounds(Scope *S, SourceLocation OpLoc, tok::TokenKind Kind,
SourceLocation LAnagleBracketLoc, ParsedType D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc, SourceLocation RParenLoc,
Expr *E1, BoundsExpr *ParsedBounds);
ExprResult ActOnBoundsCastExprSingle(
Scope *S, SourceLocation OpLoc, tok::TokenKind Kind,
SourceLocation LAnagleBracketLoc, ParsedType D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc, SourceLocation RParenLoc, Expr *E1);
ExprResult BuildBoundsCastExpr(SourceLocation OpLoc, tok::TokenKind Kind,
TypeSourceInfo *CastTypeInfo,
SourceRange AngleBrackets,
SourceRange Paren, Expr *E1,
BoundsExpr *bounds);
bool DiagnoseBoundsDeclType(QualType Ty, DeclaratorDecl *D,
BoundsAnnotations &BA, bool IsReturnAnnots);
/// \\brief Update information in ASTContext tracking for a member what
/// bounds declarations depend upon it. FD is the member whose
/// bounds are given by Bounds.
void TrackMemberBoundsDependences(FieldDecl *FD, BoundsExpr *Bounds);
void ActOnBoundsDecl(DeclaratorDecl *D, BoundsAnnotations Annots,
bool MergeDeferredBounds = false);
void ActOnEmptyBoundsDecl(DeclaratorDecl *D);
void ActOnInvalidBoundsDecl(DeclaratorDecl *D);
/// \brief Add default bounds/interop type expressions to Annots, if appropriate.
void InferBoundsAnnots(QualType Ty, BoundsAnnotations &Annots, bool IsParam);
// \#pragma CHECKED_SCOPE.
enum PragmaCheckedScopeKind {
PCSK_On,
PCSK_Off,
PCSK_BoundsOnly,
PCSK_Push,
PCSK_Pop
};
void ActOnPragmaCheckedScope(PragmaCheckedScopeKind Kind, SourceLocation Loc);
void DiagnoseUnterminatedPragmaCheckedScopePush();
BoundsExpr *CreateInvalidBoundsExpr();
/// /brief Synthesize the interop type expression implied by the presence
/// of a bounds expression. Ty is the original unchecked type. Returns null
/// if none exists.
InteropTypeExpr *SynthesizeInteropTypeExpr(QualType Ty, bool IsParam);
BoundsExpr *CreateCountForArrayType(QualType QT);
// _Return_value in Checked C bounds expressions.
ExprResult ActOnReturnValueExpr(SourceLocation Loc);
/// \brief When non-NULL, the type of the '_Return_value' expression.
QualType BoundsExprReturnValue;
/// \brief RAII object used to temporarily set the the type of _Return_value
class CheckedCReturnValueRAII {
Sema &S;
QualType OldReturnValue;
public:
CheckedCReturnValueRAII(Sema &S, QualType ReturnVal) : S(S) {
OldReturnValue = S.BoundsExprReturnValue;
S.BoundsExprReturnValue = ReturnVal;
}
~CheckedCReturnValueRAII() {
S.BoundsExprReturnValue = OldReturnValue;
}
};
typedef bool
(*ParseDeferredBoundsCallBackFn)(void *P,
std::unique_ptr<CachedTokens> Toks,
ArrayRef<ParmVarDecl *> Params,
BoundsAnnotations &Result,
const Declarator &D);
void SetDeferredBoundsCallBack(void *OpaqueData, ParseDeferredBoundsCallBackFn p);
ParseDeferredBoundsCallBackFn DeferredBoundsParser;
void *DeferredBoundsParserData;
// Represents the context where an expression must be non-modifying.
enum NonModifyingContext {
NMC_Unknown,
NMC_Dynamic_Check,
NMC_Count, // Bounds count expression.
NMC_Byte_Count, // Bounds byte count expression.
NMC_Range, // Bounds range expression.
NMC_Function_Return, // Argument for parameter used in function
// return bounds.
NMC_Function_Parameter // Argument for parameter used in function
// parameter bounds.
};
/// /brief Checks whether an expression is non-modifying
/// (see Checked C Spec, 3.6.1). Returns true if the expression is non-modifying,
/// false otherwise.
enum NonModifyingMessage {
NMM_None,
NMM_Error,
NMM_Note
};
/// \brief Checks whether an expression is non-modifying
/// (see Checked C Spec, 3.6.1). Returns true if the expression is non-modifying,
/// false otherwise.
bool CheckIsNonModifying(Expr *E, NonModifyingContext Req =
NonModifyingContext::NMC_Unknown,
NonModifyingMessage = NMM_Error);
BoundsExpr *CheckNonModifyingBounds(BoundsExpr *Bounds, Expr *E);
ExprResult ActOnFunctionTypeApplication(ExprResult TypeFunc, SourceLocation Loc, ArrayRef<TypeArgument> Args);
RecordDecl *ActOnRecordTypeApplication(RecordDecl *Base, ArrayRef<TypeArgument> TypeArgs);
const ExistentialType *ActOnExistentialType(ASTContext &Context, const Type *TypeVar, QualType InnerType);
/// Complete a delayed type application by populating the record's fields with the right types.
/// Should only be called once per delayed 'RecordDecl'.
void CompleteTypeAppFields(RecordDecl *Incomplete);
// Determine whether the given 'RecordDecl' is part of an 'expanding cycle'.
// Generic records that form part of an expanding cycle can't be instantiated because they
// produce an infinite number of type applications (because we construct the transitive closure
// of type applications eagerly).
//
// Consider the graph of type parameter dependencies as defined below. An expanding cycle
// is a cycle in the graph that contains at least one expanding edge.
//
// We show how the graph is built via an example. Suppose we have three generic structs A<T>, B<U>, C<V>:
//
// struct A _For_any(T) { struct A<T>* a; struct B<T> *b; }
// struct B _For_any(U) { struct C<struct C<U> > *c; }
// struct C _For_any(V) { struct A<V>* a; }
//
// The vertices of the graph are T, U, and V (the type parameter, alpha re-named if needed).
// There is an edge between nodes N1 and N2 if N2 is used in a field anywhere in the position of N1.
// If N2 appears at the "top-level" replacing N1, then the resulting edge is "non-expanding".
// Otheriwse, if N2 appears nested within the argument that replaces N1, then the edge is "expanding".
//
// In our example the edges are:
//
// non-expanding: T -> T, T -> U, V -> T, U -> V
// expanding: U => V
//
// T -> U, U => V, V -> T is an expanding cycle because it contains the expanding edge U => V
//
// The cycle will be detected when C is processed (because C is defined last). If we tried to instantiate C, we would
// end up performing the following type applications:
// A<V>, B<V>, C<C<V>>, A<C<V>>, B<C<V>>, C<C<C<V>>>, ...
//
// The definition of expanding cycle is adapted from the 'ECMA 335 Common Language Infrastructure (CLI) Partitions I to VI' standard.
// Specifically, Partition II, section II.9.2 'Generics and recursive inheritance graphs'.
bool DiagnoseExpandingCycles(RecordDecl *Base, SourceLocation Loc);
QualType SubstituteTypeArgs(QualType QT, ArrayRef<TypeArgument> TypeArgs);
std::vector<const TypedefNameDecl *> FindFreeVariableDecls(QualType T);
bool AbstractForFunctionType(BoundsAnnotations &BA,
ArrayRef<DeclaratorChunk::ParamInfo> Params);
/// \brief Take a bounds expression with positional parameters from a function
/// type and substitute DeclRefs to the corresonding parameters in Params.
BoundsExpr *ConcretizeFromFunctionType(BoundsExpr *Expr,
ArrayRef<ParmVarDecl *> Params);
/// \brief Take a member bounds expression with member references and
/// replace the member references with member access expressions using
/// MemberBase as the base. Returns a nullptr if there is an error.
BoundsExpr *MakeMemberBoundsConcrete(Expr *MemberBase, bool IsArrow,
BoundsExpr *Bounds);
BoundsExpr *ConcretizeFromFunctionTypeWithArgs(BoundsExpr *Bounds, ArrayRef<Expr *> Args,
NonModifyingContext ErrorKind);
/// ConvertToFullyCheckedType: convert an expression E to a fully checked type. This
/// is used to retype declrefs and member exprs in checked scopes with bounds-safe
/// interfaces. The Checked C spec that says that such uses in checked scopes shall be
/// treated as having "checked type".
ExprResult ConvertToFullyCheckedType(Expr *E, InteropTypeExpr *BA, bool IsParamUse,
ExprValueKind VK);
/// GetArrayPtrDereference - determine if an lvalue expression is a
/// dereference of an _Array_ptr or _Nt_array_ptr (via '*" or an array
/// subscript operator). If it is, return the actual dereference expression
/// and set Result to the pointer type being dereferenced. Otherwise, return
/// null.
Expr *GetArrayPtrDereference(Expr *E, QualType &Result);
/// ReplaceAssignmentImplicitCast: E has had assignment conversion rules
/// applied to it. If an implicit cast has been introduced because of the
/// assignment conversion rules, replace it with an explicit cast.
/// This allows us to substitute E into other operator expressions without worrying
/// about the different implicit conversion rules between assignments and
//// other operators. Sema tree rewriting assumes that semantic
/// analysis will recreate implicit casts. That doesn't happen properly if
/// E is taken from an assignment expression and used in another operator expression.
Expr *MakeAssignmentImplicitCastExplicit(Expr *E);
enum BoundsDeclarationCheck {
BDC_Assignment,
BDC_Decrement,
BDC_Increment,
BDC_Initialization,
BDC_Statement,
};
/// \brief Check that address=of operation is not taking the
/// address of members used in bounds.
void CheckAddressTakenMembers(UnaryOperator *AddrOf);
/// \brief Check whether E contains a return value expression.
bool ContainsReturnValueExpr(Expr *E);
/// \brief Wrap a call expression in a Checked C temporay binding
/// expression, if a temporary is needed to describe the bounds
/// of the result of the call expression.
ExprResult CreateTemporaryForCallIfNeeded(ExprResult R);
/// CheckFunctionBodyBoundsDecls - check bounds declarations within a function
/// body.
void CheckFunctionBodyBoundsDecls(FunctionDecl *FD, Stmt *Body);
/// CheckTopLevelBoundsDecls - check bounds declarations for variable declarations
/// not within a function body.
void CheckTopLevelBoundsDecls(VarDecl *VD);
// WarnDynamicCheckAlwaysFails - Adds a warning if an explicit dynamic check
// will always fail.
void WarnDynamicCheckAlwaysFails(const Expr *Condition);
// If the VarDecl D has a byte_count or count bounds expression,
// NormalizeBounds expands it to a range bounds expression. The expanded
// range bounds are attached to the VarDecl D to avoid recomputing the
// normalized bounds for D.
BoundsExpr *NormalizeBounds(const VarDecl *D);
// This is wrapper around CheckBoundsDeclaration::ExpandToRange. This
// provides an easy way to invoke this function from outside the class. Given
// a byte_count or count bounds expression for the VarDecl D, ExpandToRange
// will expand it to a range bounds expression.
BoundsExpr *ExpandBoundsToRange(const VarDecl *D, const BoundsExpr *B);
//
// Track variables that in-scope bounds declarations depend upon.
// TODO: generalize this to other lvalue expressions.
class BoundsDependencyTracker {
public:
typedef SmallVector<VarDecl *, 2> VarBoundsDecls;
typedef VarBoundsDecls::iterator VarBoundsIterator;
typedef llvm::iterator_range<VarBoundsIterator> VarBoundsIteratorRange;
// mapping from variables to bounds that depend upon the variables.
typedef std::map<VarDecl *, VarBoundsDecls> DependentMap;
private:
// Map variables to the bounds declarations that are
// in scope and depend upon them.
DependentMap Map;
// Track the bounds that are in scope so that we can remove them from the
// dependent map when the scope is exited.
std::vector<VarDecl *> BoundsInScope;
public:
BoundsDependencyTracker() {}
// Call these when entering/exiting scopes so that we can track when
// variables go out of scope. EnterScope returns an integer
// that should be passed to the corresponding ExitScope call.
unsigned EnterScope();
void ExitScope(unsigned scopeBegin);
// If D has a bounds declaration, add its dependencies to the existing
// scope.
void Add(VarDecl *D);
VarBoundsIteratorRange DependentBoundsDecls(VarDecl *D) {
auto Iter = Map.find(D);
if (Iter == Map.end())
return VarBoundsIteratorRange(nullptr, nullptr);
return VarBoundsIteratorRange(Iter->second.begin(),Iter->second.end());
}
void Dump(raw_ostream &OS);
};
BoundsDependencyTracker BoundsDependencies;
// Map expressions that modify lvalues (assignments and pre/post
// increment/decrement operations) to bounds that may depend on the modified
// lvalues. We check the validity of bounds declarations after
// expression statements using data flow analysis. During the analysis,
// we need to know whether an expression modifies an lvalue involved in a
// bounds invariant. The AST traversal order for determining this is lexical
// and conflicts with preferred orderings for dataflow analysis, so we
// precompute this information before analyzing a function body.
class ModifiedBoundsDependencies {
public:
// A C lvalue expression with bounds on values stored in the lvalue.
// It is either a variable or a member expression.
struct LValueWithBounds {
LValueWithBounds(llvm::PointerUnion<VarDecl *, MemberExpr *> Target,
BoundsExpr *Bounds) : Target(Target), Bounds(Bounds) {}
llvm::PointerUnion<VarDecl *, MemberExpr *> Target;
BoundsExpr *Bounds; // Bounds for target.
};
typedef SmallVector<LValueWithBounds,2> LValuesWithBounds;
// Map assignments or pre/post increment/decrement expressions to bounds
// that depend upon the lvalue modified by the expressions.
typedef std::map<Expr *, LValuesWithBounds> DependentBounds;
void Add(Expr *E, llvm::PointerUnion<VarDecl *, MemberExpr *> LValue,
BoundsExpr *Bounds);
void Dump(raw_ostream &OS);
ModifiedBoundsDependencies() {}
DependentBounds Tracker;
};
/// \brief Compute a mapping from statements that modify lvalues to
/// in-scope bounds declarations that depend on those lvalues.
/// FD is the function being declared and Body is the body of the
/// function. They are passed in separately because Body hasn't
/// been attached to FD yet.
void ComputeBoundsDependencies(ModifiedBoundsDependencies &Tracker,
FunctionDecl *FD, Stmt *Body);
/// \brief RAII class used to indicate that we are substituting an expression
/// into another expression during bounds checking. We need to suppress
/// diagnostics emission during this. We are doing type-preserving
/// substitutions, so we don't expect semantic errors during substitution.
/// There could be warnings, which would confuse users. The warnings could
/// could also be escalated to errors, which would cause compilation failures.
class ExprSubstitutionScope {
Sema &SemaRef;
bool PrevDisableSubstitionDiagnostics;
public:
explicit ExprSubstitutionScope(Sema &SemaRef,
bool DisableDiagnostics = true)
: SemaRef(SemaRef),
PrevDisableSubstitionDiagnostics(
SemaRef.DisableSubstitionDiagnostics) {
SemaRef.DisableSubstitionDiagnostics = DisableDiagnostics;
}
~ExprSubstitutionScope() {
SemaRef.DisableSubstitionDiagnostics =
PrevDisableSubstitionDiagnostics;
}
};
bool DisableSubstitionDiagnostics;
ExprResult ActOnPackExpression(Expr *PackedExpr,
QualType ExistType,
TypeArgument SubstArg,
SourceLocation StartLoc,
SourceLocation EndLoc);
//===---------------------------- Clang Extensions ----------------------===//
/// __builtin_convertvector(...)
ExprResult ActOnConvertVectorExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- OpenCL Features -----------------------===//
/// __builtin_astype(...)
ExprResult ActOnAsTypeExpr(Expr *E, ParsedType ParsedDestTy,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
//===---------------------------- C++ Features --------------------------===//
// Act on C++ namespaces
Decl *ActOnStartNamespaceDef(Scope *S, SourceLocation InlineLoc,
SourceLocation NamespaceLoc,
SourceLocation IdentLoc, IdentifierInfo *Ident,
SourceLocation LBrace,
const ParsedAttributesView &AttrList,
UsingDirectiveDecl *&UsingDecl);
void ActOnFinishNamespaceDef(Decl *Dcl, SourceLocation RBrace);
NamespaceDecl *getStdNamespace() const;
NamespaceDecl *getOrCreateStdNamespace();
NamespaceDecl *lookupStdExperimentalNamespace();
CXXRecordDecl *getStdBadAlloc() const;
EnumDecl *getStdAlignValT() const;
private:
// A cache representing if we've fully checked the various comparison category
// types stored in ASTContext. The bit-index corresponds to the integer value
// of a ComparisonCategoryType enumerator.
llvm::SmallBitVector FullyCheckedComparisonCategories;
ValueDecl *tryLookupCtorInitMemberDecl(CXXRecordDecl *ClassDecl,
CXXScopeSpec &SS,
ParsedType TemplateTypeTy,
IdentifierInfo *MemberOrBase);
public:
/// Lookup the specified comparison category types in the standard
/// library, an check the VarDecls possibly returned by the operator<=>
/// builtins for that type.
///
/// \return The type of the comparison category type corresponding to the
/// specified Kind, or a null type if an error occurs
QualType CheckComparisonCategoryType(ComparisonCategoryType Kind,
SourceLocation Loc);
/// Tests whether Ty is an instance of std::initializer_list and, if
/// it is and Element is not NULL, assigns the element type to Element.
bool isStdInitializerList(QualType Ty, QualType *Element);
/// Looks for the std::initializer_list template and instantiates it
/// with Element, or emits an error if it's not found.
///
/// \returns The instantiated template, or null on error.
QualType BuildStdInitializerList(QualType Element, SourceLocation Loc);
/// Determine whether Ctor is an initializer-list constructor, as
/// defined in [dcl.init.list]p2.
bool isInitListConstructor(const FunctionDecl *Ctor);
Decl *ActOnUsingDirective(Scope *CurScope, SourceLocation UsingLoc,
SourceLocation NamespcLoc, CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *NamespcName,
const ParsedAttributesView &AttrList);
void PushUsingDirective(Scope *S, UsingDirectiveDecl *UDir);
Decl *ActOnNamespaceAliasDef(Scope *CurScope,
SourceLocation NamespaceLoc,
SourceLocation AliasLoc,
IdentifierInfo *Alias,
CXXScopeSpec &SS,
SourceLocation IdentLoc,
IdentifierInfo *Ident);
void HideUsingShadowDecl(Scope *S, UsingShadowDecl *Shadow);
bool CheckUsingShadowDecl(UsingDecl *UD, NamedDecl *Target,
const LookupResult &PreviousDecls,
UsingShadowDecl *&PrevShadow);
UsingShadowDecl *BuildUsingShadowDecl(Scope *S, UsingDecl *UD,
NamedDecl *Target,
UsingShadowDecl *PrevDecl);
bool CheckUsingDeclRedeclaration(SourceLocation UsingLoc,
bool HasTypenameKeyword,
const CXXScopeSpec &SS,
SourceLocation NameLoc,
const LookupResult &Previous);
bool CheckUsingDeclQualifier(SourceLocation UsingLoc,
bool HasTypename,
const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
SourceLocation NameLoc);
NamedDecl *BuildUsingDeclaration(
Scope *S, AccessSpecifier AS, SourceLocation UsingLoc,
bool HasTypenameKeyword, SourceLocation TypenameLoc, CXXScopeSpec &SS,
DeclarationNameInfo NameInfo, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList, bool IsInstantiation);
NamedDecl *BuildUsingPackDecl(NamedDecl *InstantiatedFrom,
ArrayRef<NamedDecl *> Expansions);
bool CheckInheritingConstructorUsingDecl(UsingDecl *UD);
/// Given a derived-class using shadow declaration for a constructor and the
/// correspnding base class constructor, find or create the implicit
/// synthesized derived class constructor to use for this initialization.
CXXConstructorDecl *
findInheritingConstructor(SourceLocation Loc, CXXConstructorDecl *BaseCtor,
ConstructorUsingShadowDecl *DerivedShadow);
Decl *ActOnUsingDeclaration(Scope *CurScope, AccessSpecifier AS,
SourceLocation UsingLoc,
SourceLocation TypenameLoc, CXXScopeSpec &SS,
UnqualifiedId &Name, SourceLocation EllipsisLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnAliasDeclaration(Scope *CurScope, AccessSpecifier AS,
MultiTemplateParamsArg TemplateParams,
SourceLocation UsingLoc, UnqualifiedId &Name,
const ParsedAttributesView &AttrList,
TypeResult Type, Decl *DeclFromDeclSpec);
/// BuildCXXConstructExpr - Creates a complete call to a constructor,
/// including handling of its default argument expressions.
///
/// \param ConstructKind - a CXXConstructExpr::ConstructionKind
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
/// Build a CXXConstructExpr whose constructor has already been resolved if
/// it denotes an inherited constructor.
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs,
bool HadMultipleCandidates, bool IsListInitialization,
bool IsStdInitListInitialization,
bool RequiresZeroInit, unsigned ConstructKind,
SourceRange ParenRange);
// FIXME: Can we remove this and have the above BuildCXXConstructExpr check if
// the constructor can be elidable?
ExprResult
BuildCXXConstructExpr(SourceLocation ConstructLoc, QualType DeclInitType,
NamedDecl *FoundDecl,
CXXConstructorDecl *Constructor, bool Elidable,
MultiExprArg Exprs, bool HadMultipleCandidates,
bool IsListInitialization,
bool IsStdInitListInitialization, bool RequiresZeroInit,
unsigned ConstructKind, SourceRange ParenRange);
ExprResult BuildCXXDefaultInitExpr(SourceLocation Loc, FieldDecl *Field);
/// Instantiate or parse a C++ default argument expression as necessary.
/// Return true on error.
bool CheckCXXDefaultArgExpr(SourceLocation CallLoc, FunctionDecl *FD,
ParmVarDecl *Param);
/// BuildCXXDefaultArgExpr - Creates a CXXDefaultArgExpr, instantiating
/// the default expr if needed.
ExprResult BuildCXXDefaultArgExpr(SourceLocation CallLoc,
FunctionDecl *FD,
ParmVarDecl *Param);
/// FinalizeVarWithDestructor - Prepare for calling destructor on the
/// constructed variable.
void FinalizeVarWithDestructor(VarDecl *VD, const RecordType *DeclInitType);
/// Helper class that collects exception specifications for
/// implicitly-declared special member functions.
class ImplicitExceptionSpecification {
// Pointer to allow copying
Sema *Self;
// We order exception specifications thus:
// noexcept is the most restrictive, but is only used in C++11.
// throw() comes next.
// Then a throw(collected exceptions)
// Finally no specification, which is expressed as noexcept(false).
// throw(...) is used instead if any called function uses it.
ExceptionSpecificationType ComputedEST;
llvm::SmallPtrSet<CanQualType, 4> ExceptionsSeen;
SmallVector<QualType, 4> Exceptions;
void ClearExceptions() {
ExceptionsSeen.clear();
Exceptions.clear();
}
public:
explicit ImplicitExceptionSpecification(Sema &Self)
: Self(&Self), ComputedEST(EST_BasicNoexcept) {
if (!Self.getLangOpts().CPlusPlus11)
ComputedEST = EST_DynamicNone;
}
/// Get the computed exception specification type.
ExceptionSpecificationType getExceptionSpecType() const {
assert(!isComputedNoexcept(ComputedEST) &&
"noexcept(expr) should not be a possible result");
return ComputedEST;
}
/// The number of exceptions in the exception specification.
unsigned size() const { return Exceptions.size(); }
/// The set of exceptions in the exception specification.
const QualType *data() const { return Exceptions.data(); }
/// Integrate another called method into the collected data.
void CalledDecl(SourceLocation CallLoc, const CXXMethodDecl *Method);
/// Integrate an invoked expression into the collected data.
void CalledExpr(Expr *E);
/// Overwrite an EPI's exception specification with this
/// computed exception specification.
FunctionProtoType::ExceptionSpecInfo getExceptionSpec() const {
FunctionProtoType::ExceptionSpecInfo ESI;
ESI.Type = getExceptionSpecType();
if (ESI.Type == EST_Dynamic) {
ESI.Exceptions = Exceptions;
} else if (ESI.Type == EST_None) {
/// C++11 [except.spec]p14:
/// The exception-specification is noexcept(false) if the set of
/// potential exceptions of the special member function contains "any"
ESI.Type = EST_NoexceptFalse;
ESI.NoexceptExpr = Self->ActOnCXXBoolLiteral(SourceLocation(),
tok::kw_false).get();
}
return ESI;
}
};
/// Determine what sort of exception specification a defaulted
/// copy constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDefaultCtorExceptionSpec(SourceLocation Loc,
CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// default constructor of a class will have, and whether the parameter
/// will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// copy assignment operator of a class will have, and whether the
/// parameter will be const.
ImplicitExceptionSpecification
ComputeDefaultedCopyAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveCtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted move
/// assignment operator of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedMoveAssignmentExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification a defaulted
/// destructor of a class will have.
ImplicitExceptionSpecification
ComputeDefaultedDtorExceptionSpec(CXXMethodDecl *MD);
/// Determine what sort of exception specification an inheriting
/// constructor of a class will have.
ImplicitExceptionSpecification
ComputeInheritingCtorExceptionSpec(SourceLocation Loc,
CXXConstructorDecl *CD);
/// Evaluate the implicit exception specification for a defaulted
/// special member function.
void EvaluateImplicitExceptionSpec(SourceLocation Loc, CXXMethodDecl *MD);
/// Check the given noexcept-specifier, convert its expression, and compute
/// the appropriate ExceptionSpecificationType.
ExprResult ActOnNoexceptSpec(SourceLocation NoexceptLoc, Expr *NoexceptExpr,
ExceptionSpecificationType &EST);
/// Check the given exception-specification and update the
/// exception specification information with the results.
void checkExceptionSpecification(bool IsTopLevel,
ExceptionSpecificationType EST,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr,
SmallVectorImpl<QualType> &Exceptions,
FunctionProtoType::ExceptionSpecInfo &ESI);
/// Determine if we're in a case where we need to (incorrectly) eagerly
/// parse an exception specification to work around a libstdc++ bug.
bool isLibstdcxxEagerExceptionSpecHack(const Declarator &D);
/// Add an exception-specification to the given member function
/// (or member function template). The exception-specification was parsed
/// after the method itself was declared.
void actOnDelayedExceptionSpecification(Decl *Method,
ExceptionSpecificationType EST,
SourceRange SpecificationRange,
ArrayRef<ParsedType> DynamicExceptions,
ArrayRef<SourceRange> DynamicExceptionRanges,
Expr *NoexceptExpr);
class InheritedConstructorInfo;
/// Determine if a special member function should have a deleted
/// definition when it is defaulted.
bool ShouldDeleteSpecialMember(CXXMethodDecl *MD, CXXSpecialMember CSM,
InheritedConstructorInfo *ICI = nullptr,
bool Diagnose = false);
/// Declare the implicit default constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// default constructor will be added.
///
/// \returns The implicitly-declared default constructor.
CXXConstructorDecl *DeclareImplicitDefaultConstructor(
CXXRecordDecl *ClassDecl);
/// DefineImplicitDefaultConstructor - Checks for feasibility of
/// defining this constructor as the default constructor.
void DefineImplicitDefaultConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit destructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// destructor will be added.
///
/// \returns The implicitly-declared destructor.
CXXDestructorDecl *DeclareImplicitDestructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitDestructor - Checks for feasibility of
/// defining this destructor as the default destructor.
void DefineImplicitDestructor(SourceLocation CurrentLocation,
CXXDestructorDecl *Destructor);
/// Build an exception spec for destructors that don't have one.
///
/// C++11 says that user-defined destructors with no exception spec get one
/// that looks as if the destructor was implicitly declared.
void AdjustDestructorExceptionSpec(CXXDestructorDecl *Destructor);
/// Define the specified inheriting constructor.
void DefineInheritingConstructor(SourceLocation UseLoc,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy constructor for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy constructor will be added.
///
/// \returns The implicitly-declared copy constructor.
CXXConstructorDecl *DeclareImplicitCopyConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitCopyConstructor - Checks for feasibility of
/// defining this constructor as the copy constructor.
void DefineImplicitCopyConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit move constructor for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move constructor will be added.
///
/// \returns The implicitly-declared move constructor, or NULL if it wasn't
/// declared.
CXXConstructorDecl *DeclareImplicitMoveConstructor(CXXRecordDecl *ClassDecl);
/// DefineImplicitMoveConstructor - Checks for feasibility of
/// defining this constructor as the move constructor.
void DefineImplicitMoveConstructor(SourceLocation CurrentLocation,
CXXConstructorDecl *Constructor);
/// Declare the implicit copy assignment operator for the given class.
///
/// \param ClassDecl The class declaration into which the implicit
/// copy assignment operator will be added.
///
/// \returns The implicitly-declared copy assignment operator.
CXXMethodDecl *DeclareImplicitCopyAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared copy assignment operator.
void DefineImplicitCopyAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Declare the implicit move assignment operator for the given class.
///
/// \param ClassDecl The Class declaration into which the implicit
/// move assignment operator will be added.
///
/// \returns The implicitly-declared move assignment operator, or NULL if it
/// wasn't declared.
CXXMethodDecl *DeclareImplicitMoveAssignment(CXXRecordDecl *ClassDecl);
/// Defines an implicitly-declared move assignment operator.
void DefineImplicitMoveAssignment(SourceLocation CurrentLocation,
CXXMethodDecl *MethodDecl);
/// Force the declaration of any implicitly-declared members of this
/// class.
void ForceDeclarationOfImplicitMembers(CXXRecordDecl *Class);
/// Check a completed declaration of an implicit special member.
void CheckImplicitSpecialMemberDeclaration(Scope *S, FunctionDecl *FD);
/// Determine whether the given function is an implicitly-deleted
/// special member function.
bool isImplicitlyDeleted(FunctionDecl *FD);
/// Check whether 'this' shows up in the type of a static member
/// function after the (naturally empty) cv-qualifier-seq would be.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionType(CXXMethodDecl *Method);
/// Whether this' shows up in the exception specification of a static
/// member function.
bool checkThisInStaticMemberFunctionExceptionSpec(CXXMethodDecl *Method);
/// Check whether 'this' shows up in the attributes of the given
/// static member function.
///
/// \returns true if an error occurred.
bool checkThisInStaticMemberFunctionAttributes(CXXMethodDecl *Method);
/// MaybeBindToTemporary - If the passed in expression has a record type with
/// a non-trivial destructor, this will return CXXBindTemporaryExpr. Otherwise
/// it simply returns the passed in expression.
ExprResult MaybeBindToTemporary(Expr *E);
bool CompleteConstructorCall(CXXConstructorDecl *Constructor,
MultiExprArg ArgsPtr,
SourceLocation Loc,
SmallVectorImpl<Expr*> &ConvertedArgs,
bool AllowExplicit = false,
bool IsListInitialization = false);
ParsedType getInheritingConstructorName(CXXScopeSpec &SS,
SourceLocation NameLoc,
IdentifierInfo &Name);
ParsedType getConstructorName(IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
bool EnteringContext);
ParsedType getDestructorName(SourceLocation TildeLoc,
IdentifierInfo &II, SourceLocation NameLoc,
Scope *S, CXXScopeSpec &SS,
ParsedType ObjectType,
bool EnteringContext);
ParsedType getDestructorTypeForDecltype(const DeclSpec &DS,
ParsedType ObjectType);
// Checks that reinterpret casts don't have undefined behavior.
void CheckCompatibleReinterpretCast(QualType SrcType, QualType DestType,
bool IsDereference, SourceRange Range);
/// ActOnCXXNamedCast - Parse {dynamic,static,reinterpret,const}_cast's.
ExprResult ActOnCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
SourceLocation LAngleBracketLoc,
Declarator &D,
SourceLocation RAngleBracketLoc,
SourceLocation LParenLoc,
Expr *E,
SourceLocation RParenLoc);
ExprResult BuildCXXNamedCast(SourceLocation OpLoc,
tok::TokenKind Kind,
TypeSourceInfo *Ty,
Expr *E,
SourceRange AngleBrackets,
SourceRange Parens);
ExprResult ActOnBuiltinBitCastExpr(SourceLocation KWLoc, Declarator &Dcl,
ExprResult Operand,
SourceLocation RParenLoc);
ExprResult BuildBuiltinBitCastExpr(SourceLocation KWLoc, TypeSourceInfo *TSI,
Expr *Operand, SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXTypeId(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXTypeid - Parse typeid( something ).
ExprResult ActOnCXXTypeid(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
TypeSourceInfo *Operand,
SourceLocation RParenLoc);
ExprResult BuildCXXUuidof(QualType TypeInfoType,
SourceLocation TypeidLoc,
Expr *Operand,
SourceLocation RParenLoc);
/// ActOnCXXUuidof - Parse __uuidof( something ).
ExprResult ActOnCXXUuidof(SourceLocation OpLoc,
SourceLocation LParenLoc, bool isType,
void *TyOrExpr,
SourceLocation RParenLoc);
/// Handle a C++1z fold-expression: ( expr op ... op expr ).
ExprResult ActOnCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
tok::TokenKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc);
ExprResult BuildCXXFoldExpr(SourceLocation LParenLoc, Expr *LHS,
BinaryOperatorKind Operator,
SourceLocation EllipsisLoc, Expr *RHS,
SourceLocation RParenLoc,
Optional<unsigned> NumExpansions);
ExprResult BuildEmptyCXXFoldExpr(SourceLocation EllipsisLoc,
BinaryOperatorKind Operator);
//// ActOnCXXThis - Parse 'this' pointer.
ExprResult ActOnCXXThis(SourceLocation loc);
/// Build a CXXThisExpr and mark it referenced in the current context.
Expr *BuildCXXThisExpr(SourceLocation Loc, QualType Type, bool IsImplicit);
void MarkThisReferenced(CXXThisExpr *This);
/// Try to retrieve the type of the 'this' pointer.
///
/// \returns The type of 'this', if possible. Otherwise, returns a NULL type.
QualType getCurrentThisType();
/// When non-NULL, the C++ 'this' expression is allowed despite the
/// current context not being a non-static member function. In such cases,
/// this provides the type used for 'this'.
QualType CXXThisTypeOverride;
/// RAII object used to temporarily allow the C++ 'this' expression
/// to be used, with the given qualifiers on the current class type.
class CXXThisScopeRAII {
Sema &S;
QualType OldCXXThisTypeOverride;
bool Enabled;
public:
/// Introduce a new scope where 'this' may be allowed (when enabled),
/// using the given declaration (which is either a class template or a
/// class) along with the given qualifiers.
/// along with the qualifiers placed on '*this'.
CXXThisScopeRAII(Sema &S, Decl *ContextDecl, Qualifiers CXXThisTypeQuals,
bool Enabled = true);
~CXXThisScopeRAII();
};
/// Make sure the value of 'this' is actually available in the current
/// context, if it is a potentially evaluated context.
///
/// \param Loc The location at which the capture of 'this' occurs.
///
/// \param Explicit Whether 'this' is explicitly captured in a lambda
/// capture list.
///
/// \param FunctionScopeIndexToStopAt If non-null, it points to the index
/// of the FunctionScopeInfo stack beyond which we do not attempt to capture.
/// This is useful when enclosing lambdas must speculatively capture
/// 'this' that may or may not be used in certain specializations of
/// a nested generic lambda (depending on whether the name resolves to
/// a non-static member function or a static function).
/// \return returns 'true' if failed, 'false' if success.
bool CheckCXXThisCapture(SourceLocation Loc, bool Explicit = false,
bool BuildAndDiagnose = true,
const unsigned *const FunctionScopeIndexToStopAt = nullptr,
bool ByCopy = false);
/// Determine whether the given type is the type of *this that is used
/// outside of the body of a member function for a type that is currently
/// being defined.
bool isThisOutsideMemberFunctionBody(QualType BaseType);
/// ActOnCXXBoolLiteral - Parse {true,false} literals.
ExprResult ActOnCXXBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
/// ActOnObjCBoolLiteral - Parse {__objc_yes,__objc_no} literals.
ExprResult ActOnObjCBoolLiteral(SourceLocation OpLoc, tok::TokenKind Kind);
ExprResult
ActOnObjCAvailabilityCheckExpr(llvm::ArrayRef<AvailabilitySpec> AvailSpecs,
SourceLocation AtLoc, SourceLocation RParen);
/// ActOnCXXNullPtrLiteral - Parse 'nullptr'.
ExprResult ActOnCXXNullPtrLiteral(SourceLocation Loc);
//// ActOnCXXThrow - Parse throw expressions.
ExprResult ActOnCXXThrow(Scope *S, SourceLocation OpLoc, Expr *expr);
ExprResult BuildCXXThrow(SourceLocation OpLoc, Expr *Ex,
bool IsThrownVarInScope);
bool CheckCXXThrowOperand(SourceLocation ThrowLoc, QualType ThrowTy, Expr *E);
/// ActOnCXXTypeConstructExpr - Parse construction of a specified type.
/// Can be interpreted either as function-style casting ("int(x)")
/// or class type construction ("ClassType(x,y,z)")
/// or creation of a value-initialized type ("int()").
ExprResult ActOnCXXTypeConstructExpr(ParsedType TypeRep,
SourceLocation LParenOrBraceLoc,
MultiExprArg Exprs,
SourceLocation RParenOrBraceLoc,
bool ListInitialization);
ExprResult BuildCXXTypeConstructExpr(TypeSourceInfo *Type,
SourceLocation LParenLoc,
MultiExprArg Exprs,
SourceLocation RParenLoc,
bool ListInitialization);
/// ActOnCXXNew - Parsed a C++ 'new' expression.
ExprResult ActOnCXXNew(SourceLocation StartLoc, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens, Declarator &D,
Expr *Initializer);
ExprResult BuildCXXNew(SourceRange Range, bool UseGlobal,
SourceLocation PlacementLParen,
MultiExprArg PlacementArgs,
SourceLocation PlacementRParen,
SourceRange TypeIdParens,
QualType AllocType,
TypeSourceInfo *AllocTypeInfo,
Optional<Expr *> ArraySize,
SourceRange DirectInitRange,
Expr *Initializer);
/// Determine whether \p FD is an aligned allocation or deallocation
/// function that is unavailable.
bool isUnavailableAlignedAllocationFunction(const FunctionDecl &FD) const;
/// Produce diagnostics if \p FD is an aligned allocation or deallocation
/// function that is unavailable.
void diagnoseUnavailableAlignedAllocation(const FunctionDecl &FD,
SourceLocation Loc);
bool CheckAllocatedType(QualType AllocType, SourceLocation Loc,
SourceRange R);
/// The scope in which to find allocation functions.
enum AllocationFunctionScope {
/// Only look for allocation functions in the global scope.
AFS_Global,
/// Only look for allocation functions in the scope of the
/// allocated class.
AFS_Class,
/// Look for allocation functions in both the global scope
/// and in the scope of the allocated class.
AFS_Both
};
/// Finds the overloads of operator new and delete that are appropriate
/// for the allocation.
bool FindAllocationFunctions(SourceLocation StartLoc, SourceRange Range,
AllocationFunctionScope NewScope,
AllocationFunctionScope DeleteScope,
QualType AllocType, bool IsArray,
bool &PassAlignment, MultiExprArg PlaceArgs,
FunctionDecl *&OperatorNew,
FunctionDecl *&OperatorDelete,
bool Diagnose = true);
void DeclareGlobalNewDelete();
void DeclareGlobalAllocationFunction(DeclarationName Name, QualType Return,
ArrayRef<QualType> Params);
bool FindDeallocationFunction(SourceLocation StartLoc, CXXRecordDecl *RD,
DeclarationName Name, FunctionDecl* &Operator,
bool Diagnose = true);
FunctionDecl *FindUsualDeallocationFunction(SourceLocation StartLoc,
bool CanProvideSize,
bool Overaligned,
DeclarationName Name);
FunctionDecl *FindDeallocationFunctionForDestructor(SourceLocation StartLoc,
CXXRecordDecl *RD);
/// ActOnCXXDelete - Parsed a C++ 'delete' expression
ExprResult ActOnCXXDelete(SourceLocation StartLoc,
bool UseGlobal, bool ArrayForm,
Expr *Operand);
void CheckVirtualDtorCall(CXXDestructorDecl *dtor, SourceLocation Loc,
bool IsDelete, bool CallCanBeVirtual,
bool WarnOnNonAbstractTypes,
SourceLocation DtorLoc);
ExprResult ActOnNoexceptExpr(SourceLocation KeyLoc, SourceLocation LParen,
Expr *Operand, SourceLocation RParen);
ExprResult BuildCXXNoexceptExpr(SourceLocation KeyLoc, Expr *Operand,
SourceLocation RParen);
/// Parsed one of the type trait support pseudo-functions.
ExprResult ActOnTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<ParsedType> Args,
SourceLocation RParenLoc);
ExprResult BuildTypeTrait(TypeTrait Kind, SourceLocation KWLoc,
ArrayRef<TypeSourceInfo *> Args,
SourceLocation RParenLoc);
/// ActOnArrayTypeTrait - Parsed one of the binary type trait support
/// pseudo-functions.
ExprResult ActOnArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
ParsedType LhsTy,
Expr *DimExpr,
SourceLocation RParen);
ExprResult BuildArrayTypeTrait(ArrayTypeTrait ATT,
SourceLocation KWLoc,
TypeSourceInfo *TSInfo,
Expr *DimExpr,
SourceLocation RParen);
/// ActOnExpressionTrait - Parsed one of the unary type trait support
/// pseudo-functions.
ExprResult ActOnExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult BuildExpressionTrait(ExpressionTrait OET,
SourceLocation KWLoc,
Expr *Queried,
SourceLocation RParen);
ExprResult ActOnStartCXXMemberReference(Scope *S,
Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
ParsedType &ObjectType,
bool &MayBePseudoDestructor);
ExprResult BuildPseudoDestructorExpr(Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
const CXXScopeSpec &SS,
TypeSourceInfo *ScopeType,
SourceLocation CCLoc,
SourceLocation TildeLoc,
PseudoDestructorTypeStorage DestroyedType);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
CXXScopeSpec &SS,
UnqualifiedId &FirstTypeName,
SourceLocation CCLoc,
SourceLocation TildeLoc,
UnqualifiedId &SecondTypeName);
ExprResult ActOnPseudoDestructorExpr(Scope *S, Expr *Base,
SourceLocation OpLoc,
tok::TokenKind OpKind,
SourceLocation TildeLoc,
const DeclSpec& DS);
/// MaybeCreateExprWithCleanups - If the current full-expression
/// requires any cleanups, surround it with a ExprWithCleanups node.
/// Otherwise, just returns the passed-in expression.
Expr *MaybeCreateExprWithCleanups(Expr *SubExpr);
Stmt *MaybeCreateStmtWithCleanups(Stmt *SubStmt);
ExprResult MaybeCreateExprWithCleanups(ExprResult SubExpr);
MaterializeTemporaryExpr *
CreateMaterializeTemporaryExpr(QualType T, Expr *Temporary,
bool BoundToLvalueReference);
ExprResult ActOnFinishFullExpr(Expr *Expr, bool DiscardedValue) {
return ActOnFinishFullExpr(
Expr, Expr ? Expr->getExprLoc() : SourceLocation(), DiscardedValue);
}
ExprResult ActOnFinishFullExpr(Expr *Expr, SourceLocation CC,
bool DiscardedValue, bool IsConstexpr = false);
StmtResult ActOnFinishFullStmt(Stmt *Stmt);
// Marks SS invalid if it represents an incomplete type.
bool RequireCompleteDeclContext(CXXScopeSpec &SS, DeclContext *DC);
DeclContext *computeDeclContext(QualType T);
DeclContext *computeDeclContext(const CXXScopeSpec &SS,
bool EnteringContext = false);
bool isDependentScopeSpecifier(const CXXScopeSpec &SS);
CXXRecordDecl *getCurrentInstantiationOf(NestedNameSpecifier *NNS);
/// The parser has parsed a global nested-name-specifier '::'.
///
/// \param CCLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXGlobalScopeSpecifier(SourceLocation CCLoc, CXXScopeSpec &SS);
/// The parser has parsed a '__super' nested-name-specifier.
///
/// \param SuperLoc The location of the '__super' keyword.
///
/// \param ColonColonLoc The location of the '::'.
///
/// \param SS The nested-name-specifier, which will be updated in-place
/// to reflect the parsed nested-name-specifier.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnSuperScopeSpecifier(SourceLocation SuperLoc,
SourceLocation ColonColonLoc, CXXScopeSpec &SS);
bool isAcceptableNestedNameSpecifier(const NamedDecl *SD,
bool *CanCorrect = nullptr);
NamedDecl *FindFirstQualifierInScope(Scope *S, NestedNameSpecifier *NNS);
/// Keeps information about an identifier in a nested-name-spec.
///
struct NestedNameSpecInfo {
/// The type of the object, if we're parsing nested-name-specifier in
/// a member access expression.
ParsedType ObjectType;
/// The identifier preceding the '::'.
IdentifierInfo *Identifier;
/// The location of the identifier.
SourceLocation IdentifierLoc;
/// The location of the '::'.
SourceLocation CCLoc;
/// Creates info object for the most typical case.
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, ParsedType ObjectType = ParsedType())
: ObjectType(ObjectType), Identifier(II), IdentifierLoc(IdLoc),
CCLoc(ColonColonLoc) {
}
NestedNameSpecInfo(IdentifierInfo *II, SourceLocation IdLoc,
SourceLocation ColonColonLoc, QualType ObjectType)
: ObjectType(ParsedType::make(ObjectType)), Identifier(II),
IdentifierLoc(IdLoc), CCLoc(ColonColonLoc) {
}
};
bool isNonTypeNestedNameSpecifier(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo);
bool BuildCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
NamedDecl *ScopeLookupResult,
bool ErrorRecoveryLookup,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
/// The parser has parsed a nested-name-specifier 'identifier::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param IdInfo Parser information about an identifier in the
/// nested-name-spec.
///
/// \param EnteringContext Whether we're entering the context nominated by
/// this nested-name-specifier.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param ErrorRecoveryLookup If true, then this method is called to improve
/// error recovery. In this case do not emit error message.
///
/// \param IsCorrectedToColon If not null, suggestions to replace '::' -> ':'
/// are allowed. The bool value pointed by this parameter is set to 'true'
/// if the identifier is treated as if it was followed by ':', not '::'.
///
/// \param OnlyNamespace If true, only considers namespaces in lookup.
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
NestedNameSpecInfo &IdInfo,
bool EnteringContext,
CXXScopeSpec &SS,
bool ErrorRecoveryLookup = false,
bool *IsCorrectedToColon = nullptr,
bool OnlyNamespace = false);
ExprResult ActOnDecltypeExpression(Expr *E);
bool ActOnCXXNestedNameSpecifierDecltype(CXXScopeSpec &SS,
const DeclSpec &DS,
SourceLocation ColonColonLoc);
bool IsInvalidUnlessNestedName(Scope *S, CXXScopeSpec &SS,
NestedNameSpecInfo &IdInfo,
bool EnteringContext);
/// The parser has parsed a nested-name-specifier
/// 'template[opt] template-name < template-args >::'.
///
/// \param S The scope in which this nested-name-specifier occurs.
///
/// \param SS The nested-name-specifier, which is both an input
/// parameter (the nested-name-specifier before this type) and an
/// output parameter (containing the full nested-name-specifier,
/// including this new type).
///
/// \param TemplateKWLoc the location of the 'template' keyword, if any.
/// \param TemplateName the template name.
/// \param TemplateNameLoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
/// \param CCLoc The location of the '::'.
///
/// \param EnteringContext Whether we're entering the context of the
/// nested-name-specifier.
///
///
/// \returns true if an error occurred, false otherwise.
bool ActOnCXXNestedNameSpecifier(Scope *S,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateName,
SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc,
SourceLocation CCLoc,
bool EnteringContext);
/// Given a C++ nested-name-specifier, produce an annotation value
/// that the parser can use later to reconstruct the given
/// nested-name-specifier.
///
/// \param SS A nested-name-specifier.
///
/// \returns A pointer containing all of the information in the
/// nested-name-specifier \p SS.
void *SaveNestedNameSpecifierAnnotation(CXXScopeSpec &SS);
/// Given an annotation pointer for a nested-name-specifier, restore
/// the nested-name-specifier structure.
///
/// \param Annotation The annotation pointer, produced by
/// \c SaveNestedNameSpecifierAnnotation().
///
/// \param AnnotationRange The source range corresponding to the annotation.
///
/// \param SS The nested-name-specifier that will be updated with the contents
/// of the annotation pointer.
void RestoreNestedNameSpecifierAnnotation(void *Annotation,
SourceRange AnnotationRange,
CXXScopeSpec &SS);
bool ShouldEnterDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclaratorScope - Called when a C++ scope specifier (global
/// scope or nested-name-specifier) is parsed, part of a declarator-id.
/// After this method is called, according to [C++ 3.4.3p3], names should be
/// looked up in the declarator-id's scope, until the declarator is parsed and
/// ActOnCXXExitDeclaratorScope is called.
/// The 'SS' should be a non-empty valid CXXScopeSpec.
bool ActOnCXXEnterDeclaratorScope(Scope *S, CXXScopeSpec &SS);
/// ActOnCXXExitDeclaratorScope - Called when a declarator that previously
/// invoked ActOnCXXEnterDeclaratorScope(), is finished. 'SS' is the same
/// CXXScopeSpec that was passed to ActOnCXXEnterDeclaratorScope as well.
/// Used to indicate that names should revert to being looked up in the
/// defining scope.
void ActOnCXXExitDeclaratorScope(Scope *S, const CXXScopeSpec &SS);
/// ActOnCXXEnterDeclInitializer - Invoked when we are about to parse an
/// initializer for the declaration 'Dcl'.
/// After this method is called, according to [C++ 3.4.1p13], if 'Dcl' is a
/// static data member of class X, names should be looked up in the scope of
/// class X.
void ActOnCXXEnterDeclInitializer(Scope *S, Decl *Dcl);
/// ActOnCXXExitDeclInitializer - Invoked after we are finished parsing an
/// initializer for the declaration 'Dcl'.
void ActOnCXXExitDeclInitializer(Scope *S, Decl *Dcl);
/// Create a new lambda closure type.
CXXRecordDecl *createLambdaClosureType(SourceRange IntroducerRange,
TypeSourceInfo *Info,
bool KnownDependent,
LambdaCaptureDefault CaptureDefault);
/// Start the definition of a lambda expression.
CXXMethodDecl *
startLambdaDefinition(CXXRecordDecl *Class, SourceRange IntroducerRange,
TypeSourceInfo *MethodType, SourceLocation EndLoc,
ArrayRef<ParmVarDecl *> Params,
ConstexprSpecKind ConstexprKind,
Optional<std::pair<unsigned, Decl *>> Mangling = None);
/// Endow the lambda scope info with the relevant properties.
void buildLambdaScope(sema::LambdaScopeInfo *LSI,
CXXMethodDecl *CallOperator,
SourceRange IntroducerRange,
LambdaCaptureDefault CaptureDefault,
SourceLocation CaptureDefaultLoc,
bool ExplicitParams,
bool ExplicitResultType,
bool Mutable);
/// Perform initialization analysis of the init-capture and perform
/// any implicit conversions such as an lvalue-to-rvalue conversion if
/// not being used to initialize a reference.
ParsedType actOnLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
IdentifierInfo *Id, LambdaCaptureInitKind InitKind, Expr *&Init) {
return ParsedType::make(buildLambdaInitCaptureInitialization(
Loc, ByRef, EllipsisLoc, None, Id,
InitKind != LambdaCaptureInitKind::CopyInit, Init));
}
QualType buildLambdaInitCaptureInitialization(
SourceLocation Loc, bool ByRef, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions, IdentifierInfo *Id, bool DirectInit,
Expr *&Init);
/// Create a dummy variable within the declcontext of the lambda's
/// call operator, for name lookup purposes for a lambda init capture.
///
/// CodeGen handles emission of lambda captures, ignoring these dummy
/// variables appropriately.
VarDecl *createLambdaInitCaptureVarDecl(SourceLocation Loc,
QualType InitCaptureType,
SourceLocation EllipsisLoc,
IdentifierInfo *Id,
unsigned InitStyle, Expr *Init);
/// Add an init-capture to a lambda scope.
void addInitCapture(sema::LambdaScopeInfo *LSI, VarDecl *Var);
/// Note that we have finished the explicit captures for the
/// given lambda.
void finishLambdaExplicitCaptures(sema::LambdaScopeInfo *LSI);
/// \brief This is called after parsing the explicit template parameter list
/// on a lambda (if it exists) in C++2a.
void ActOnLambdaExplicitTemplateParameterList(SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> TParams,
SourceLocation RAngleLoc);
/// Introduce the lambda parameters into scope.
void addLambdaParameters(
ArrayRef<LambdaIntroducer::LambdaCapture> Captures,
CXXMethodDecl *CallOperator, Scope *CurScope);
/// Deduce a block or lambda's return type based on the return
/// statements present in the body.
void deduceClosureReturnType(sema::CapturingScopeInfo &CSI);
/// ActOnStartOfLambdaDefinition - This is called just before we start
/// parsing the body of a lambda; it analyzes the explicit captures and
/// arguments, and sets up various data-structures for the body of the
/// lambda.
void ActOnStartOfLambdaDefinition(LambdaIntroducer &Intro,
Declarator &ParamInfo, Scope *CurScope);
/// ActOnLambdaError - If there is an error parsing a lambda, this callback
/// is invoked to pop the information about the lambda.
void ActOnLambdaError(SourceLocation StartLoc, Scope *CurScope,
bool IsInstantiation = false);
/// ActOnLambdaExpr - This is called when the body of a lambda expression
/// was successfully completed.
ExprResult ActOnLambdaExpr(SourceLocation StartLoc, Stmt *Body,
Scope *CurScope);
/// Does copying/destroying the captured variable have side effects?
bool CaptureHasSideEffects(const sema::Capture &From);
/// Diagnose if an explicit lambda capture is unused. Returns true if a
/// diagnostic is emitted.
bool DiagnoseUnusedLambdaCapture(SourceRange CaptureRange,
const sema::Capture &From);
/// Build a FieldDecl suitable to hold the given capture.
FieldDecl *BuildCaptureField(RecordDecl *RD, const sema::Capture &Capture);
/// Initialize the given capture with a suitable expression.
ExprResult BuildCaptureInit(const sema::Capture &Capture,
SourceLocation ImplicitCaptureLoc,
bool IsOpenMPMapping = false);
/// Complete a lambda-expression having processed and attached the
/// lambda body.
ExprResult BuildLambdaExpr(SourceLocation StartLoc, SourceLocation EndLoc,
sema::LambdaScopeInfo *LSI);
/// Get the return type to use for a lambda's conversion function(s) to
/// function pointer type, given the type of the call operator.
QualType
getLambdaConversionFunctionResultType(const FunctionProtoType *CallOpType);
/// Define the "body" of the conversion from a lambda object to a
/// function pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToFunctionPointerConversion(
SourceLocation CurrentLoc, CXXConversionDecl *Conv);
/// Define the "body" of the conversion from a lambda object to a
/// block pointer.
///
/// This routine doesn't actually define a sensible body; rather, it fills
/// in the initialization expression needed to copy the lambda object into
/// the block, and IR generation actually generates the real body of the
/// block pointer conversion.
void DefineImplicitLambdaToBlockPointerConversion(SourceLocation CurrentLoc,
CXXConversionDecl *Conv);
ExprResult BuildBlockForLambdaConversion(SourceLocation CurrentLocation,
SourceLocation ConvLocation,
CXXConversionDecl *Conv,
Expr *Src);
// ParseObjCStringLiteral - Parse Objective-C string literals.
ExprResult ParseObjCStringLiteral(SourceLocation *AtLocs,
ArrayRef<Expr *> Strings);
ExprResult BuildObjCStringLiteral(SourceLocation AtLoc, StringLiteral *S);
/// BuildObjCNumericLiteral - builds an ObjCBoxedExpr AST node for the
/// numeric literal expression. Type of the expression will be "NSNumber *"
/// or "id" if NSNumber is unavailable.
ExprResult BuildObjCNumericLiteral(SourceLocation AtLoc, Expr *Number);
ExprResult ActOnObjCBoolLiteral(SourceLocation AtLoc, SourceLocation ValueLoc,
bool Value);
ExprResult BuildObjCArrayLiteral(SourceRange SR, MultiExprArg Elements);
/// BuildObjCBoxedExpr - builds an ObjCBoxedExpr AST node for the
/// '@' prefixed parenthesized expression. The type of the expression will
/// either be "NSNumber *", "NSString *" or "NSValue *" depending on the type
/// of ValueType, which is allowed to be a built-in numeric type, "char *",
/// "const char *" or C structure with attribute 'objc_boxable'.
ExprResult BuildObjCBoxedExpr(SourceRange SR, Expr *ValueExpr);
ExprResult BuildObjCSubscriptExpression(SourceLocation RB, Expr *BaseExpr,
Expr *IndexExpr,
ObjCMethodDecl *getterMethod,
ObjCMethodDecl *setterMethod);
ExprResult BuildObjCDictionaryLiteral(SourceRange SR,
MutableArrayRef<ObjCDictionaryElement> Elements);
ExprResult BuildObjCEncodeExpression(SourceLocation AtLoc,
TypeSourceInfo *EncodedTypeInfo,
SourceLocation RParenLoc);
ExprResult BuildCXXMemberCallExpr(Expr *Exp, NamedDecl *FoundDecl,
CXXConversionDecl *Method,
bool HadMultipleCandidates);
ExprResult ParseObjCEncodeExpression(SourceLocation AtLoc,
SourceLocation EncodeLoc,
SourceLocation LParenLoc,
ParsedType Ty,
SourceLocation RParenLoc);
/// ParseObjCSelectorExpression - Build selector expression for \@selector
ExprResult ParseObjCSelectorExpression(Selector Sel,
SourceLocation AtLoc,
SourceLocation SelLoc,
SourceLocation LParenLoc,
SourceLocation RParenLoc,
bool WarnMultipleSelectors);
/// ParseObjCProtocolExpression - Build protocol expression for \@protocol
ExprResult ParseObjCProtocolExpression(IdentifierInfo * ProtocolName,
SourceLocation AtLoc,
SourceLocation ProtoLoc,
SourceLocation LParenLoc,
SourceLocation ProtoIdLoc,
SourceLocation RParenLoc);
//===--------------------------------------------------------------------===//
// C++ Declarations
//
Decl *ActOnStartLinkageSpecification(Scope *S,
SourceLocation ExternLoc,
Expr *LangStr,
SourceLocation LBraceLoc);
Decl *ActOnFinishLinkageSpecification(Scope *S,
Decl *LinkageSpec,
SourceLocation RBraceLoc);
//===--------------------------------------------------------------------===//
// C++ Classes
//
CXXRecordDecl *getCurrentClass(Scope *S, const CXXScopeSpec *SS);
bool isCurrentClassName(const IdentifierInfo &II, Scope *S,
const CXXScopeSpec *SS = nullptr);
bool isCurrentClassNameTypo(IdentifierInfo *&II, const CXXScopeSpec *SS);
bool ActOnAccessSpecifier(AccessSpecifier Access, SourceLocation ASLoc,
SourceLocation ColonLoc,
const ParsedAttributesView &Attrs);
NamedDecl *ActOnCXXMemberDeclarator(Scope *S, AccessSpecifier AS,
Declarator &D,
MultiTemplateParamsArg TemplateParameterLists,
Expr *BitfieldWidth, const VirtSpecifiers &VS,
InClassInitStyle InitStyle);
void ActOnStartCXXInClassMemberInitializer();
void ActOnFinishCXXInClassMemberInitializer(Decl *VarDecl,
SourceLocation EqualLoc,
Expr *Init);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
SourceLocation LParenLoc,
ArrayRef<Expr *> Args,
SourceLocation RParenLoc,
SourceLocation EllipsisLoc);
MemInitResult ActOnMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *InitList,
SourceLocation EllipsisLoc);
MemInitResult BuildMemInitializer(Decl *ConstructorD,
Scope *S,
CXXScopeSpec &SS,
IdentifierInfo *MemberOrBase,
ParsedType TemplateTypeTy,
const DeclSpec &DS,
SourceLocation IdLoc,
Expr *Init,
SourceLocation EllipsisLoc);
MemInitResult BuildMemberInitializer(ValueDecl *Member,
Expr *Init,
SourceLocation IdLoc);
MemInitResult BuildBaseInitializer(QualType BaseType,
TypeSourceInfo *BaseTInfo,
Expr *Init,
CXXRecordDecl *ClassDecl,
SourceLocation EllipsisLoc);
MemInitResult BuildDelegatingInitializer(TypeSourceInfo *TInfo,
Expr *Init,
CXXRecordDecl *ClassDecl);
bool SetDelegatingInitializer(CXXConstructorDecl *Constructor,
CXXCtorInitializer *Initializer);
bool SetCtorInitializers(CXXConstructorDecl *Constructor, bool AnyErrors,
ArrayRef<CXXCtorInitializer *> Initializers = None);
void SetIvarInitializers(ObjCImplementationDecl *ObjCImplementation);
/// MarkBaseAndMemberDestructorsReferenced - Given a record decl,
/// mark all the non-trivial destructors of its members and bases as
/// referenced.
void MarkBaseAndMemberDestructorsReferenced(SourceLocation Loc,
CXXRecordDecl *Record);
/// The list of classes whose vtables have been used within
/// this translation unit, and the source locations at which the
/// first use occurred.
typedef std::pair<CXXRecordDecl*, SourceLocation> VTableUse;
/// The list of vtables that are required but have not yet been
/// materialized.
SmallVector<VTableUse, 16> VTableUses;
/// The set of classes whose vtables have been used within
/// this translation unit, and a bit that will be true if the vtable is
/// required to be emitted (otherwise, it should be emitted only if needed
/// by code generation).
llvm::DenseMap<CXXRecordDecl *, bool> VTablesUsed;
/// Load any externally-stored vtable uses.
void LoadExternalVTableUses();
/// Note that the vtable for the given class was used at the
/// given location.
void MarkVTableUsed(SourceLocation Loc, CXXRecordDecl *Class,
bool DefinitionRequired = false);
/// Mark the exception specifications of all virtual member functions
/// in the given class as needed.
void MarkVirtualMemberExceptionSpecsNeeded(SourceLocation Loc,
const CXXRecordDecl *RD);
/// MarkVirtualMembersReferenced - Will mark all members of the given
/// CXXRecordDecl referenced.
void MarkVirtualMembersReferenced(SourceLocation Loc, const CXXRecordDecl *RD,
bool ConstexprOnly = false);
/// Define all of the vtables that have been used in this
/// translation unit and reference any virtual members used by those
/// vtables.
///
/// \returns true if any work was done, false otherwise.
bool DefineUsedVTables();
void AddImplicitlyDeclaredMembersToClass(CXXRecordDecl *ClassDecl);
void ActOnMemInitializers(Decl *ConstructorDecl,
SourceLocation ColonLoc,
ArrayRef<CXXCtorInitializer*> MemInits,
bool AnyErrors);
/// Check class-level dllimport/dllexport attribute. The caller must
/// ensure that referenceDLLExportedClassMethods is called some point later
/// when all outer classes of Class are complete.
void checkClassLevelDLLAttribute(CXXRecordDecl *Class);
void checkClassLevelCodeSegAttribute(CXXRecordDecl *Class);
void referenceDLLExportedClassMethods();
void propagateDLLAttrToBaseClassTemplate(
CXXRecordDecl *Class, Attr *ClassAttr,
ClassTemplateSpecializationDecl *BaseTemplateSpec,
SourceLocation BaseLoc);
void CheckCompletedCXXClass(CXXRecordDecl *Record);
/// Check that the C++ class annoated with "trivial_abi" satisfies all the
/// conditions that are needed for the attribute to have an effect.
void checkIllFormedTrivialABIStruct(CXXRecordDecl &RD);
void ActOnFinishCXXMemberSpecification(Scope *S, SourceLocation RLoc,
Decl *TagDecl, SourceLocation LBrac,
SourceLocation RBrac,
const ParsedAttributesView &AttrList);
void ActOnFinishCXXMemberDecls();
void ActOnFinishCXXNonNestedClass(Decl *D);
void ActOnReenterCXXMethodParameter(Scope *S, ParmVarDecl *Param);
unsigned ActOnReenterTemplateScope(Scope *S, Decl *Template);
void ActOnStartDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnStartDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnDelayedCXXMethodParameter(Scope *S, Decl *Param);
void ActOnFinishDelayedMemberDeclarations(Scope *S, Decl *Record);
void ActOnFinishDelayedCXXMethodDeclaration(Scope *S, Decl *Method);
void ActOnFinishDelayedMemberInitializers(Decl *Record);
void MarkAsLateParsedTemplate(FunctionDecl *FD, Decl *FnD,
CachedTokens &Toks);
void UnmarkAsLateParsedTemplate(FunctionDecl *FD);
bool IsInsideALocalClassWithinATemplateFunction();
Decl *ActOnStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
Expr *AssertMessageExpr,
SourceLocation RParenLoc);
Decl *BuildStaticAssertDeclaration(SourceLocation StaticAssertLoc,
Expr *AssertExpr,
StringLiteral *AssertMessageExpr,
SourceLocation RParenLoc,
bool Failed);
FriendDecl *CheckFriendTypeDecl(SourceLocation LocStart,
SourceLocation FriendLoc,
TypeSourceInfo *TSInfo);
Decl *ActOnFriendTypeDecl(Scope *S, const DeclSpec &DS,
MultiTemplateParamsArg TemplateParams);
NamedDecl *ActOnFriendFunctionDecl(Scope *S, Declarator &D,
MultiTemplateParamsArg TemplateParams);
QualType CheckConstructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
void CheckConstructor(CXXConstructorDecl *Constructor);
QualType CheckDestructorDeclarator(Declarator &D, QualType R,
StorageClass& SC);
bool CheckDestructor(CXXDestructorDecl *Destructor);
void CheckConversionDeclarator(Declarator &D, QualType &R,
StorageClass& SC);
Decl *ActOnConversionDeclarator(CXXConversionDecl *Conversion);
void CheckDeductionGuideDeclarator(Declarator &D, QualType &R,
StorageClass &SC);
void CheckDeductionGuideTemplate(FunctionTemplateDecl *TD);
void CheckExplicitlyDefaultedSpecialMember(CXXMethodDecl *MD);
void CheckDelayedMemberExceptionSpecs();
//===--------------------------------------------------------------------===//
// C++ Derived Classes
//
/// ActOnBaseSpecifier - Parsed a base specifier
CXXBaseSpecifier *CheckBaseSpecifier(CXXRecordDecl *Class,
SourceRange SpecifierRange,
bool Virtual, AccessSpecifier Access,
TypeSourceInfo *TInfo,
SourceLocation EllipsisLoc);
BaseResult ActOnBaseSpecifier(Decl *classdecl,
SourceRange SpecifierRange,
ParsedAttributes &Attrs,
bool Virtual, AccessSpecifier Access,
ParsedType basetype,
SourceLocation BaseLoc,
SourceLocation EllipsisLoc);
bool AttachBaseSpecifiers(CXXRecordDecl *Class,
MutableArrayRef<CXXBaseSpecifier *> Bases);
void ActOnBaseSpecifiers(Decl *ClassDecl,
MutableArrayRef<CXXBaseSpecifier *> Bases);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base);
bool IsDerivedFrom(SourceLocation Loc, QualType Derived, QualType Base,
CXXBasePaths &Paths);
// FIXME: I don't like this name.
void BuildBasePathArray(const CXXBasePaths &Paths, CXXCastPath &BasePath);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
SourceLocation Loc, SourceRange Range,
CXXCastPath *BasePath = nullptr,
bool IgnoreAccess = false);
bool CheckDerivedToBaseConversion(QualType Derived, QualType Base,
unsigned InaccessibleBaseID,
unsigned AmbigiousBaseConvID,
SourceLocation Loc, SourceRange Range,
DeclarationName Name,
CXXCastPath *BasePath,
bool IgnoreAccess = false);
std::string getAmbiguousPathsDisplayString(CXXBasePaths &Paths);
bool CheckOverridingFunctionAttributes(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionReturnType - Checks whether the return types are
/// covariant, according to C++ [class.virtual]p5.
bool CheckOverridingFunctionReturnType(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
/// CheckOverridingFunctionExceptionSpec - Checks whether the exception
/// spec is a subset of base spec.
bool CheckOverridingFunctionExceptionSpec(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
bool CheckPureMethod(CXXMethodDecl *Method, SourceRange InitRange);
/// CheckOverrideControl - Check C++11 override control semantics.
void CheckOverrideControl(NamedDecl *D);
/// DiagnoseAbsenceOfOverrideControl - Diagnose if 'override' keyword was
/// not used in the declaration of an overriding method.
void DiagnoseAbsenceOfOverrideControl(NamedDecl *D);
/// CheckForFunctionMarkedFinal - Checks whether a virtual member function
/// overrides a virtual member function marked 'final', according to
/// C++11 [class.virtual]p4.
bool CheckIfOverriddenFunctionIsMarkedFinal(const CXXMethodDecl *New,
const CXXMethodDecl *Old);
//===--------------------------------------------------------------------===//
// C++ Access Control
//
enum AccessResult {
AR_accessible,
AR_inaccessible,
AR_dependent,
AR_delayed
};
bool SetMemberAccessSpecifier(NamedDecl *MemberDecl,
NamedDecl *PrevMemberDecl,
AccessSpecifier LexicalAS);
AccessResult CheckUnresolvedMemberAccess(UnresolvedMemberExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckUnresolvedLookupAccess(UnresolvedLookupExpr *E,
DeclAccessPair FoundDecl);
AccessResult CheckAllocationAccess(SourceLocation OperatorLoc,
SourceRange PlacementRange,
CXXRecordDecl *NamingClass,
DeclAccessPair FoundDecl,
bool Diagnose = true);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
bool IsCopyBindingRefToTemp = false);
AccessResult CheckConstructorAccess(SourceLocation Loc,
CXXConstructorDecl *D,
DeclAccessPair FoundDecl,
const InitializedEntity &Entity,
const PartialDiagnostic &PDiag);
AccessResult CheckDestructorAccess(SourceLocation Loc,
CXXDestructorDecl *Dtor,
const PartialDiagnostic &PDiag,
QualType objectType = QualType());
AccessResult CheckFriendAccess(NamedDecl *D);
AccessResult CheckMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *NamingClass,
DeclAccessPair Found);
AccessResult
CheckStructuredBindingMemberAccess(SourceLocation UseLoc,
CXXRecordDecl *DecomposedClass,
DeclAccessPair Field);
AccessResult CheckMemberOperatorAccess(SourceLocation Loc,
Expr *ObjectExpr,
Expr *ArgExpr,
DeclAccessPair FoundDecl);
AccessResult CheckAddressOfMemberAccess(Expr *OvlExpr,
DeclAccessPair FoundDecl);
AccessResult CheckBaseClassAccess(SourceLocation AccessLoc,
QualType Base, QualType Derived,
const CXXBasePath &Path,
unsigned DiagID,
bool ForceCheck = false,
bool ForceUnprivileged = false);
void CheckLookupAccess(const LookupResult &R);
bool IsSimplyAccessible(NamedDecl *Decl, CXXRecordDecl *NamingClass,
QualType BaseType);
bool isSpecialMemberAccessibleForDeletion(CXXMethodDecl *decl,
AccessSpecifier access,
QualType objectType);
void HandleDependentAccessCheck(const DependentDiagnostic &DD,
const MultiLevelTemplateArgumentList &TemplateArgs);
void PerformDependentDiagnostics(const DeclContext *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
void HandleDelayedAccessCheck(sema::DelayedDiagnostic &DD, Decl *Ctx);
/// When true, access checking violations are treated as SFINAE
/// failures rather than hard errors.
bool AccessCheckingSFINAE;
enum AbstractDiagSelID {
AbstractNone = -1,
AbstractReturnType,
AbstractParamType,
AbstractVariableType,
AbstractFieldType,
AbstractIvarType,
AbstractSynthesizedIvarType,
AbstractArrayType
};
bool isAbstractType(SourceLocation Loc, QualType T);
bool RequireNonAbstractType(SourceLocation Loc, QualType T,
TypeDiagnoser &Diagnoser);
template <typename... Ts>
bool RequireNonAbstractType(SourceLocation Loc, QualType T, unsigned DiagID,
const Ts &...Args) {
BoundTypeDiagnoser<Ts...> Diagnoser(DiagID, Args...);
return RequireNonAbstractType(Loc, T, Diagnoser);
}
void DiagnoseAbstractType(const CXXRecordDecl *RD);
//===--------------------------------------------------------------------===//
// C++ Overloaded Operators [C++ 13.5]
//
bool CheckOverloadedOperatorDeclaration(FunctionDecl *FnDecl);
bool CheckLiteralOperatorDeclaration(FunctionDecl *FnDecl);
//===--------------------------------------------------------------------===//
// C++ Templates [C++ 14]
//
void FilterAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
bool hasAnyAcceptableTemplateNames(LookupResult &R,
bool AllowFunctionTemplates = true,
bool AllowDependent = true,
bool AllowNonTemplateFunctions = false);
/// Try to interpret the lookup result D as a template-name.
///
/// \param D A declaration found by name lookup.
/// \param AllowFunctionTemplates Whether function templates should be
/// considered valid results.
/// \param AllowDependent Whether unresolved using declarations (that might
/// name templates) should be considered valid results.
NamedDecl *getAsTemplateNameDecl(NamedDecl *D,
bool AllowFunctionTemplates = true,
bool AllowDependent = true);
enum class AssumedTemplateKind {
/// This is not assumed to be a template name.
None,
/// This is assumed to be a template name because lookup found nothing.
FoundNothing,
/// This is assumed to be a template name because lookup found one or more
/// functions (but no function templates).
FoundFunctions,
};
bool LookupTemplateName(LookupResult &R, Scope *S, CXXScopeSpec &SS,
QualType ObjectType, bool EnteringContext,
bool &MemberOfUnknownSpecialization,
SourceLocation TemplateKWLoc = SourceLocation(),
AssumedTemplateKind *ATK = nullptr);
TemplateNameKind isTemplateName(Scope *S,
CXXScopeSpec &SS,
bool hasTemplateKeyword,
const UnqualifiedId &Name,
ParsedType ObjectType,
bool EnteringContext,
TemplateTy &Template,
bool &MemberOfUnknownSpecialization);
/// Try to resolve an undeclared template name as a type template.
///
/// Sets II to the identifier corresponding to the template name, and updates
/// Name to a corresponding (typo-corrected) type template name and TNK to
/// the corresponding kind, if possible.
void ActOnUndeclaredTypeTemplateName(Scope *S, TemplateTy &Name,
TemplateNameKind &TNK,
SourceLocation NameLoc,
IdentifierInfo *&II);
bool resolveAssumedTemplateNameAsType(Scope *S, TemplateName &Name,
SourceLocation NameLoc,
bool Diagnose = true);
/// Determine whether a particular identifier might be the name in a C++1z
/// deduction-guide declaration.
bool isDeductionGuideName(Scope *S, const IdentifierInfo &Name,
SourceLocation NameLoc,
ParsedTemplateTy *Template = nullptr);
bool DiagnoseUnknownTemplateName(const IdentifierInfo &II,
SourceLocation IILoc,
Scope *S,
const CXXScopeSpec *SS,
TemplateTy &SuggestedTemplate,
TemplateNameKind &SuggestedKind);
bool DiagnoseUninstantiableTemplate(SourceLocation PointOfInstantiation,
NamedDecl *Instantiation,
bool InstantiatedFromMember,
const NamedDecl *Pattern,
const NamedDecl *PatternDef,
TemplateSpecializationKind TSK,
bool Complain = true);
void DiagnoseTemplateParameterShadow(SourceLocation Loc, Decl *PrevDecl);
TemplateDecl *AdjustDeclIfTemplate(Decl *&Decl);
NamedDecl *ActOnTypeParameter(Scope *S, bool Typename,
SourceLocation EllipsisLoc,
SourceLocation KeyLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth, unsigned Position,
SourceLocation EqualLoc,
ParsedType DefaultArg);
QualType CheckNonTypeTemplateParameterType(TypeSourceInfo *&TSI,
SourceLocation Loc);
QualType CheckNonTypeTemplateParameterType(QualType T, SourceLocation Loc);
NamedDecl *ActOnNonTypeTemplateParameter(Scope *S, Declarator &D,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
Expr *DefaultArg);
NamedDecl *ActOnTemplateTemplateParameter(Scope *S,
SourceLocation TmpLoc,
TemplateParameterList *Params,
SourceLocation EllipsisLoc,
IdentifierInfo *ParamName,
SourceLocation ParamNameLoc,
unsigned Depth,
unsigned Position,
SourceLocation EqualLoc,
ParsedTemplateArgument DefaultArg);
TemplateParameterList *
ActOnTemplateParameterList(unsigned Depth,
SourceLocation ExportLoc,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ArrayRef<NamedDecl *> Params,
SourceLocation RAngleLoc,
Expr *RequiresClause);
/// The context in which we are checking a template parameter list.
enum TemplateParamListContext {
TPC_ClassTemplate,
TPC_VarTemplate,
TPC_FunctionTemplate,
TPC_ClassTemplateMember,
TPC_FriendClassTemplate,
TPC_FriendFunctionTemplate,
TPC_FriendFunctionTemplateDefinition,
TPC_TypeAliasTemplate
};
bool CheckTemplateParameterList(TemplateParameterList *NewParams,
TemplateParameterList *OldParams,
TemplateParamListContext TPC,
SkipBodyInfo *SkipBody = nullptr);
TemplateParameterList *MatchTemplateParametersToScopeSpecifier(
SourceLocation DeclStartLoc, SourceLocation DeclLoc,
const CXXScopeSpec &SS, TemplateIdAnnotation *TemplateId,
ArrayRef<TemplateParameterList *> ParamLists,
bool IsFriend, bool &IsMemberSpecialization, bool &Invalid);
DeclResult CheckClassTemplate(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name, SourceLocation NameLoc,
const ParsedAttributesView &Attr, TemplateParameterList *TemplateParams,
AccessSpecifier AS, SourceLocation ModulePrivateLoc,
SourceLocation FriendLoc, unsigned NumOuterTemplateParamLists,
TemplateParameterList **OuterTemplateParamLists,
SkipBodyInfo *SkipBody = nullptr);
TemplateArgumentLoc getTrivialTemplateArgumentLoc(const TemplateArgument &Arg,
QualType NTTPType,
SourceLocation Loc);
void translateTemplateArguments(const ASTTemplateArgsPtr &In,
TemplateArgumentListInfo &Out);
ParsedTemplateArgument ActOnTemplateTypeArgument(TypeResult ParsedType);
void NoteAllFoundTemplates(TemplateName Name);
QualType CheckTemplateIdType(TemplateName Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs);
TypeResult
ActOnTemplateIdType(Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
TemplateTy Template, IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc, SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs, SourceLocation RAngleLoc,
bool IsCtorOrDtorName = false, bool IsClassName = false);
/// Parsed an elaborated-type-specifier that refers to a template-id,
/// such as \c class T::template apply<U>.
TypeResult ActOnTagTemplateIdType(TagUseKind TUK,
TypeSpecifierType TagSpec,
SourceLocation TagLoc,
CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
TemplateTy TemplateD,
SourceLocation TemplateLoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgsIn,
SourceLocation RAngleLoc);
DeclResult ActOnVarTemplateSpecialization(
Scope *S, Declarator &D, TypeSourceInfo *DI,
SourceLocation TemplateKWLoc, TemplateParameterList *TemplateParams,
StorageClass SC, bool IsPartialSpecialization);
DeclResult CheckVarTemplateId(VarTemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation TemplateNameLoc,
const TemplateArgumentListInfo &TemplateArgs);
ExprResult CheckVarTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
VarTemplateDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult
CheckConceptTemplateId(const CXXScopeSpec &SS,
const DeclarationNameInfo &NameInfo,
ConceptDecl *Template,
SourceLocation TemplateLoc,
const TemplateArgumentListInfo *TemplateArgs);
void diagnoseMissingTemplateArguments(TemplateName Name, SourceLocation Loc);
ExprResult BuildTemplateIdExpr(const CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
LookupResult &R,
bool RequiresADL,
const TemplateArgumentListInfo *TemplateArgs);
ExprResult BuildQualifiedTemplateIdExpr(CXXScopeSpec &SS,
SourceLocation TemplateKWLoc,
const DeclarationNameInfo &NameInfo,
const TemplateArgumentListInfo *TemplateArgs);
TemplateNameKind ActOnDependentTemplateName(
Scope *S, CXXScopeSpec &SS, SourceLocation TemplateKWLoc,
const UnqualifiedId &Name, ParsedType ObjectType, bool EnteringContext,
TemplateTy &Template, bool AllowInjectedClassName = false);
DeclResult ActOnClassTemplateSpecialization(
Scope *S, unsigned TagSpec, TagUseKind TUK, SourceLocation KWLoc,
SourceLocation ModulePrivateLoc, TemplateIdAnnotation &TemplateId,
const ParsedAttributesView &Attr,
MultiTemplateParamsArg TemplateParameterLists,
SkipBodyInfo *SkipBody = nullptr);
bool CheckTemplatePartialSpecializationArgs(SourceLocation Loc,
TemplateDecl *PrimaryTemplate,
unsigned NumExplicitArgs,
ArrayRef<TemplateArgument> Args);
void CheckTemplatePartialSpecialization(
ClassTemplatePartialSpecializationDecl *Partial);
void CheckTemplatePartialSpecialization(
VarTemplatePartialSpecializationDecl *Partial);
Decl *ActOnTemplateDeclarator(Scope *S,
MultiTemplateParamsArg TemplateParameterLists,
Declarator &D);
bool
CheckSpecializationInstantiationRedecl(SourceLocation NewLoc,
TemplateSpecializationKind NewTSK,
NamedDecl *PrevDecl,
TemplateSpecializationKind PrevTSK,
SourceLocation PrevPtOfInstantiation,
bool &SuppressNew);
bool CheckDependentFunctionTemplateSpecialization(FunctionDecl *FD,
const TemplateArgumentListInfo &ExplicitTemplateArgs,
LookupResult &Previous);
bool CheckFunctionTemplateSpecialization(
FunctionDecl *FD, TemplateArgumentListInfo *ExplicitTemplateArgs,
LookupResult &Previous, bool QualifiedFriend = false);
bool CheckMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
void CompleteMemberSpecialization(NamedDecl *Member, LookupResult &Previous);
DeclResult ActOnExplicitInstantiation(
Scope *S, SourceLocation ExternLoc, SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc, const CXXScopeSpec &SS,
TemplateTy Template, SourceLocation TemplateNameLoc,
SourceLocation LAngleLoc, ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc, const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S, SourceLocation ExternLoc,
SourceLocation TemplateLoc,
unsigned TagSpec, SourceLocation KWLoc,
CXXScopeSpec &SS, IdentifierInfo *Name,
SourceLocation NameLoc,
const ParsedAttributesView &Attr);
DeclResult ActOnExplicitInstantiation(Scope *S,
SourceLocation ExternLoc,
SourceLocation TemplateLoc,
Declarator &D);
TemplateArgumentLoc
SubstDefaultTemplateArgumentIfAvailable(TemplateDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
Decl *Param,
SmallVectorImpl<TemplateArgument>
&Converted,
bool &HasDefaultArg);
/// Specifies the context in which a particular template
/// argument is being checked.
enum CheckTemplateArgumentKind {
/// The template argument was specified in the code or was
/// instantiated with some deduced template arguments.
CTAK_Specified,
/// The template argument was deduced via template argument
/// deduction.
CTAK_Deduced,
/// The template argument was deduced from an array bound
/// via template argument deduction.
CTAK_DeducedFromArrayBound
};
bool CheckTemplateArgument(NamedDecl *Param,
TemplateArgumentLoc &Arg,
NamedDecl *Template,
SourceLocation TemplateLoc,
SourceLocation RAngleLoc,
unsigned ArgumentPackIndex,
SmallVectorImpl<TemplateArgument> &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
/// Check that the given template arguments can be be provided to
/// the given template, converting the arguments along the way.
///
/// \param Template The template to which the template arguments are being
/// provided.
///
/// \param TemplateLoc The location of the template name in the source.
///
/// \param TemplateArgs The list of template arguments. If the template is
/// a template template parameter, this function may extend the set of
/// template arguments to also include substituted, defaulted template
/// arguments.
///
/// \param PartialTemplateArgs True if the list of template arguments is
/// intentionally partial, e.g., because we're checking just the initial
/// set of template arguments.
///
/// \param Converted Will receive the converted, canonicalized template
/// arguments.
///
/// \param UpdateArgsWithConversions If \c true, update \p TemplateArgs to
/// contain the converted forms of the template arguments as written.
/// Otherwise, \p TemplateArgs will not be modified.
///
/// \returns true if an error occurred, false otherwise.
bool CheckTemplateArgumentList(TemplateDecl *Template,
SourceLocation TemplateLoc,
TemplateArgumentListInfo &TemplateArgs,
bool PartialTemplateArgs,
SmallVectorImpl<TemplateArgument> &Converted,
bool UpdateArgsWithConversions = true);
bool CheckTemplateTypeArgument(TemplateTypeParmDecl *Param,
TemplateArgumentLoc &Arg,
SmallVectorImpl<TemplateArgument> &Converted);
bool CheckTemplateArgument(TemplateTypeParmDecl *Param,
TypeSourceInfo *Arg);
ExprResult CheckTemplateArgument(NonTypeTemplateParmDecl *Param,
QualType InstantiatedParamType, Expr *Arg,
TemplateArgument &Converted,
CheckTemplateArgumentKind CTAK = CTAK_Specified);
bool CheckTemplateTemplateArgument(TemplateParameterList *Params,
TemplateArgumentLoc &Arg);
ExprResult
BuildExpressionFromDeclTemplateArgument(const TemplateArgument &Arg,
QualType ParamType,
SourceLocation Loc);
ExprResult
BuildExpressionFromIntegralTemplateArgument(const TemplateArgument &Arg,
SourceLocation Loc);
/// Enumeration describing how template parameter lists are compared
/// for equality.
enum TemplateParameterListEqualKind {
/// We are matching the template parameter lists of two templates
/// that might be redeclarations.
///
/// \code
/// template<typename T> struct X;
/// template<typename T> struct X;
/// \endcode
TPL_TemplateMatch,
/// We are matching the template parameter lists of two template
/// template parameters as part of matching the template parameter lists
/// of two templates that might be redeclarations.
///
/// \code
/// template<template<int I> class TT> struct X;
/// template<template<int Value> class Other> struct X;
/// \endcode
TPL_TemplateTemplateParmMatch,
/// We are matching the template parameter lists of a template
/// template argument against the template parameter lists of a template
/// template parameter.
///
/// \code
/// template<template<int Value> class Metafun> struct X;
/// template<int Value> struct integer_c;
/// X<integer_c> xic;
/// \endcode
TPL_TemplateTemplateArgumentMatch
};
bool TemplateParameterListsAreEqual(TemplateParameterList *New,
TemplateParameterList *Old,
bool Complain,
TemplateParameterListEqualKind Kind,
SourceLocation TemplateArgLoc
= SourceLocation());
bool CheckTemplateDeclScope(Scope *S, TemplateParameterList *TemplateParams);
/// Called when the parser has parsed a C++ typename
/// specifier, e.g., "typename T::type".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param II the identifier we're retrieving (e.g., 'type' in the example).
/// \param IdLoc the location of the identifier.
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS, const IdentifierInfo &II,
SourceLocation IdLoc);
/// Called when the parser has parsed a C++ typename
/// specifier that ends in a template-id, e.g.,
/// "typename MetaFun::template apply<T1, T2>".
///
/// \param S The scope in which this typename type occurs.
/// \param TypenameLoc the location of the 'typename' keyword
/// \param SS the nested-name-specifier following the typename (e.g., 'T::').
/// \param TemplateLoc the location of the 'template' keyword, if any.
/// \param TemplateName The template name.
/// \param TemplateII The identifier used to name the template.
/// \param TemplateIILoc The location of the template name.
/// \param LAngleLoc The location of the opening angle bracket ('<').
/// \param TemplateArgs The template arguments.
/// \param RAngleLoc The location of the closing angle bracket ('>').
TypeResult
ActOnTypenameType(Scope *S, SourceLocation TypenameLoc,
const CXXScopeSpec &SS,
SourceLocation TemplateLoc,
TemplateTy TemplateName,
IdentifierInfo *TemplateII,
SourceLocation TemplateIILoc,
SourceLocation LAngleLoc,
ASTTemplateArgsPtr TemplateArgs,
SourceLocation RAngleLoc);
QualType CheckTypenameType(ElaboratedTypeKeyword Keyword,
SourceLocation KeywordLoc,
NestedNameSpecifierLoc QualifierLoc,
const IdentifierInfo &II,
SourceLocation IILoc);
TypeSourceInfo *RebuildTypeInCurrentInstantiation(TypeSourceInfo *T,
SourceLocation Loc,
DeclarationName Name);
bool RebuildNestedNameSpecifierInCurrentInstantiation(CXXScopeSpec &SS);
ExprResult RebuildExprInCurrentInstantiation(Expr *E);
bool RebuildTemplateParamsInCurrentInstantiation(
TemplateParameterList *Params);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgumentList &Args);
std::string
getTemplateArgumentBindingsText(const TemplateParameterList *Params,
const TemplateArgument *Args,
unsigned NumArgs);
// Concepts
Decl *ActOnConceptDefinition(
Scope *S, MultiTemplateParamsArg TemplateParameterLists,
IdentifierInfo *Name, SourceLocation NameLoc, Expr *ConstraintExpr);
//===--------------------------------------------------------------------===//
// C++ Variadic Templates (C++0x [temp.variadic])
//===--------------------------------------------------------------------===//
/// Determine whether an unexpanded parameter pack might be permitted in this
/// location. Useful for error recovery.
bool isUnexpandedParameterPackPermitted();
/// The context in which an unexpanded parameter pack is
/// being diagnosed.
///
/// Note that the values of this enumeration line up with the first
/// argument to the \c err_unexpanded_parameter_pack diagnostic.
enum UnexpandedParameterPackContext {
/// An arbitrary expression.
UPPC_Expression = 0,
/// The base type of a class type.
UPPC_BaseType,
/// The type of an arbitrary declaration.
UPPC_DeclarationType,
/// The type of a data member.
UPPC_DataMemberType,
/// The size of a bit-field.
UPPC_BitFieldWidth,
/// The expression in a static assertion.
UPPC_StaticAssertExpression,
/// The fixed underlying type of an enumeration.
UPPC_FixedUnderlyingType,
/// The enumerator value.
UPPC_EnumeratorValue,
/// A using declaration.
UPPC_UsingDeclaration,
/// A friend declaration.
UPPC_FriendDeclaration,
/// A declaration qualifier.
UPPC_DeclarationQualifier,
/// An initializer.
UPPC_Initializer,
/// A default argument.
UPPC_DefaultArgument,
/// The type of a non-type template parameter.
UPPC_NonTypeTemplateParameterType,
/// The type of an exception.
UPPC_ExceptionType,
/// Partial specialization.
UPPC_PartialSpecialization,
/// Microsoft __if_exists.
UPPC_IfExists,
/// Microsoft __if_not_exists.
UPPC_IfNotExists,
/// Lambda expression.
UPPC_Lambda,
/// Block expression,
UPPC_Block
};
/// Diagnose unexpanded parameter packs.
///
/// \param Loc The location at which we should emit the diagnostic.
///
/// \param UPPC The context in which we are diagnosing unexpanded
/// parameter packs.
///
/// \param Unexpanded the set of unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPacks(SourceLocation Loc,
UnexpandedParameterPackContext UPPC,
ArrayRef<UnexpandedParameterPack> Unexpanded);
/// If the given type contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The source location where a diagnostc should be emitted.
///
/// \param T The type that is being checked for unexpanded parameter
/// packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc, TypeSourceInfo *T,
UnexpandedParameterPackContext UPPC);
/// If the given expression contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param E The expression that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(Expr *E,
UnexpandedParameterPackContext UPPC = UPPC_Expression);
/// If the given nested-name-specifier contains an unexpanded
/// parameter pack, diagnose the error.
///
/// \param SS The nested-name-specifier that is being checked for
/// unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const CXXScopeSpec &SS,
UnexpandedParameterPackContext UPPC);
/// If the given name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param NameInfo The name (with source location information) that
/// is being checked for unexpanded parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(const DeclarationNameInfo &NameInfo,
UnexpandedParameterPackContext UPPC);
/// If the given template name contains an unexpanded parameter pack,
/// diagnose the error.
///
/// \param Loc The location of the template name.
///
/// \param Template The template name that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(SourceLocation Loc,
TemplateName Template,
UnexpandedParameterPackContext UPPC);
/// If the given template argument contains an unexpanded parameter
/// pack, diagnose the error.
///
/// \param Arg The template argument that is being checked for unexpanded
/// parameter packs.
///
/// \returns true if an error occurred, false otherwise.
bool DiagnoseUnexpandedParameterPack(TemplateArgumentLoc Arg,
UnexpandedParameterPackContext UPPC);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgument Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// template argument.
///
/// \param Arg The template argument that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TemplateArgumentLoc Arg,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param T The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(QualType T,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// type.
///
/// \param TL The type that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(TypeLoc TL,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// nested-name-specifier.
///
/// \param NNS The nested-name-specifier that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(NestedNameSpecifierLoc NNS,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Collect the set of unexpanded parameter packs within the given
/// name.
///
/// \param NameInfo The name that will be traversed to find
/// unexpanded parameter packs.
void collectUnexpandedParameterPacks(const DeclarationNameInfo &NameInfo,
SmallVectorImpl<UnexpandedParameterPack> &Unexpanded);
/// Invoked when parsing a template argument followed by an
/// ellipsis, which creates a pack expansion.
///
/// \param Arg The template argument preceding the ellipsis, which
/// may already be invalid.
///
/// \param EllipsisLoc The location of the ellipsis.
ParsedTemplateArgument ActOnPackExpansion(const ParsedTemplateArgument &Arg,
SourceLocation EllipsisLoc);
/// Invoked when parsing a type followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Type The type preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
TypeResult ActOnPackExpansion(ParsedType Type, SourceLocation EllipsisLoc);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
TypeSourceInfo *CheckPackExpansion(TypeSourceInfo *Pattern,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Construct a pack expansion type from the pattern of the pack
/// expansion.
QualType CheckPackExpansion(QualType Pattern,
SourceRange PatternRange,
SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult ActOnPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc);
/// Invoked when parsing an expression followed by an ellipsis, which
/// creates a pack expansion.
///
/// \param Pattern The expression preceding the ellipsis, which will become
/// the pattern of the pack expansion.
///
/// \param EllipsisLoc The location of the ellipsis.
ExprResult CheckPackExpansion(Expr *Pattern, SourceLocation EllipsisLoc,
Optional<unsigned> NumExpansions);
/// Determine whether we could expand a pack expansion with the
/// given set of parameter packs into separate arguments by repeatedly
/// transforming the pattern.
///
/// \param EllipsisLoc The location of the ellipsis that identifies the
/// pack expansion.
///
/// \param PatternRange The source range that covers the entire pattern of
/// the pack expansion.
///
/// \param Unexpanded The set of unexpanded parameter packs within the
/// pattern.
///
/// \param ShouldExpand Will be set to \c true if the transformer should
/// expand the corresponding pack expansions into separate arguments. When
/// set, \c NumExpansions must also be set.
///
/// \param RetainExpansion Whether the caller should add an unexpanded
/// pack expansion after all of the expanded arguments. This is used
/// when extending explicitly-specified template argument packs per
/// C++0x [temp.arg.explicit]p9.
///
/// \param NumExpansions The number of separate arguments that will be in
/// the expanded form of the corresponding pack expansion. This is both an
/// input and an output parameter, which can be set by the caller if the
/// number of expansions is known a priori (e.g., due to a prior substitution)
/// and will be set by the callee when the number of expansions is known.
/// The callee must set this value when \c ShouldExpand is \c true; it may
/// set this value in other cases.
///
/// \returns true if an error occurred (e.g., because the parameter packs
/// are to be instantiated with arguments of different lengths), false
/// otherwise. If false, \c ShouldExpand (and possibly \c NumExpansions)
/// must be set.
bool CheckParameterPacksForExpansion(SourceLocation EllipsisLoc,
SourceRange PatternRange,
ArrayRef<UnexpandedParameterPack> Unexpanded,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool &ShouldExpand,
bool &RetainExpansion,
Optional<unsigned> &NumExpansions);
/// Determine the number of arguments in the given pack expansion
/// type.
///
/// This routine assumes that the number of arguments in the expansion is
/// consistent across all of the unexpanded parameter packs in its pattern.
///
/// Returns an empty Optional if the type can't be expanded.
Optional<unsigned> getNumArgumentsInExpansion(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Determine whether the given declarator contains any unexpanded
/// parameter packs.
///
/// This routine is used by the parser to disambiguate function declarators
/// with an ellipsis prior to the ')', e.g.,
///
/// \code
/// void f(T...);
/// \endcode
///
/// To determine whether we have an (unnamed) function parameter pack or
/// a variadic function.
///
/// \returns true if the declarator contains any unexpanded parameter packs,
/// false otherwise.
bool containsUnexpandedParameterPacks(Declarator &D);
/// Returns the pattern of the pack expansion for a template argument.
///
/// \param OrigLoc The template argument to expand.
///
/// \param Ellipsis Will be set to the location of the ellipsis.
///
/// \param NumExpansions Will be set to the number of expansions that will
/// be generated from this pack expansion, if known a priori.
TemplateArgumentLoc getTemplateArgumentPackExpansionPattern(
TemplateArgumentLoc OrigLoc,
SourceLocation &Ellipsis,
Optional<unsigned> &NumExpansions) const;
/// Given a template argument that contains an unexpanded parameter pack, but
/// which has already been substituted, attempt to determine the number of
/// elements that will be produced once this argument is fully-expanded.
///
/// This is intended for use when transforming 'sizeof...(Arg)' in order to
/// avoid actually expanding the pack where possible.
Optional<unsigned> getFullyPackExpandedSize(TemplateArgument Arg);
//===--------------------------------------------------------------------===//
// C++ Template Argument Deduction (C++ [temp.deduct])
//===--------------------------------------------------------------------===//
/// Adjust the type \p ArgFunctionType to match the calling convention,
/// noreturn, and optionally the exception specification of \p FunctionType.
/// Deduction often wants to ignore these properties when matching function
/// types.
QualType adjustCCAndNoReturn(QualType ArgFunctionType, QualType FunctionType,
bool AdjustExceptionSpec = false);
/// Describes the result of template argument deduction.
///
/// The TemplateDeductionResult enumeration describes the result of
/// template argument deduction, as returned from
/// DeduceTemplateArguments(). The separate TemplateDeductionInfo
/// structure provides additional information about the results of
/// template argument deduction, e.g., the deduced template argument
/// list (if successful) or the specific template parameters or
/// deduced arguments that were involved in the failure.
enum TemplateDeductionResult {
/// Template argument deduction was successful.
TDK_Success = 0,
/// The declaration was invalid; do nothing.
TDK_Invalid,
/// Template argument deduction exceeded the maximum template
/// instantiation depth (which has already been diagnosed).
TDK_InstantiationDepth,
/// Template argument deduction did not deduce a value
/// for every template parameter.
TDK_Incomplete,
/// Template argument deduction did not deduce a value for every
/// expansion of an expanded template parameter pack.
TDK_IncompletePack,
/// Template argument deduction produced inconsistent
/// deduced values for the given template parameter.
TDK_Inconsistent,
/// Template argument deduction failed due to inconsistent
/// cv-qualifiers on a template parameter type that would
/// otherwise be deduced, e.g., we tried to deduce T in "const T"
/// but were given a non-const "X".
TDK_Underqualified,
/// Substitution of the deduced template argument values
/// resulted in an error.
TDK_SubstitutionFailure,
/// After substituting deduced template arguments, a dependent
/// parameter type did not match the corresponding argument.
TDK_DeducedMismatch,
/// After substituting deduced template arguments, an element of
/// a dependent parameter type did not match the corresponding element
/// of the corresponding argument (when deducing from an initializer list).
TDK_DeducedMismatchNested,
/// A non-depnedent component of the parameter did not match the
/// corresponding component of the argument.
TDK_NonDeducedMismatch,
/// When performing template argument deduction for a function
/// template, there were too many call arguments.
TDK_TooManyArguments,
/// When performing template argument deduction for a function
/// template, there were too few call arguments.
TDK_TooFewArguments,
/// The explicitly-specified template arguments were not valid
/// template arguments for the given template.
TDK_InvalidExplicitArguments,
/// Checking non-dependent argument conversions failed.
TDK_NonDependentConversionFailure,
/// Deduction failed; that's all we know.
TDK_MiscellaneousDeductionFailure,
/// CUDA Target attributes do not match.
TDK_CUDATargetMismatch
};
TemplateDeductionResult
DeduceTemplateArguments(ClassTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(VarTemplatePartialSpecializationDecl *Partial,
const TemplateArgumentList &TemplateArgs,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult SubstituteExplicitTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo &ExplicitTemplateArgs,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
SmallVectorImpl<QualType> &ParamTypes, QualType *FunctionType,
sema::TemplateDeductionInfo &Info);
/// brief A function argument from which we performed template argument
// deduction for a call.
struct OriginalCallArg {
OriginalCallArg(QualType OriginalParamType, bool DecomposedParam,
unsigned ArgIdx, QualType OriginalArgType)
: OriginalParamType(OriginalParamType),
DecomposedParam(DecomposedParam), ArgIdx(ArgIdx),
OriginalArgType(OriginalArgType) {}
QualType OriginalParamType;
bool DecomposedParam;
unsigned ArgIdx;
QualType OriginalArgType;
};
TemplateDeductionResult FinishTemplateArgumentDeduction(
FunctionTemplateDecl *FunctionTemplate,
SmallVectorImpl<DeducedTemplateArgument> &Deduced,
unsigned NumExplicitlySpecified, FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
SmallVectorImpl<OriginalCallArg> const *OriginalCallArgs = nullptr,
bool PartialOverloading = false,
llvm::function_ref<bool()> CheckNonDependent = []{ return false; });
TemplateDeductionResult DeduceTemplateArguments(
FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs, ArrayRef<Expr *> Args,
FunctionDecl *&Specialization, sema::TemplateDeductionInfo &Info,
bool PartialOverloading,
llvm::function_ref<bool(ArrayRef<QualType>)> CheckNonDependent);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
QualType ArgFunctionType,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
QualType ToType,
CXXConversionDecl *&Specialization,
sema::TemplateDeductionInfo &Info);
TemplateDeductionResult
DeduceTemplateArguments(FunctionTemplateDecl *FunctionTemplate,
TemplateArgumentListInfo *ExplicitTemplateArgs,
FunctionDecl *&Specialization,
sema::TemplateDeductionInfo &Info,
bool IsAddressOfFunction = false);
/// Substitute Replacement for \p auto in \p TypeWithAuto
QualType SubstAutoType(QualType TypeWithAuto, QualType Replacement);
/// Substitute Replacement for auto in TypeWithAuto
TypeSourceInfo* SubstAutoTypeSourceInfo(TypeSourceInfo *TypeWithAuto,
QualType Replacement);
/// Completely replace the \c auto in \p TypeWithAuto by
/// \p Replacement. This does not retain any \c auto type sugar.
QualType ReplaceAutoType(QualType TypeWithAuto, QualType Replacement);
/// Result type of DeduceAutoType.
enum DeduceAutoResult {
DAR_Succeeded,
DAR_Failed,
DAR_FailedAlreadyDiagnosed
};
DeduceAutoResult
DeduceAutoType(TypeSourceInfo *AutoType, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
DeduceAutoResult
DeduceAutoType(TypeLoc AutoTypeLoc, Expr *&Initializer, QualType &Result,
Optional<unsigned> DependentDeductionDepth = None);
void DiagnoseAutoDeductionFailure(VarDecl *VDecl, Expr *Init);
bool DeduceReturnType(FunctionDecl *FD, SourceLocation Loc,
bool Diagnose = true);
/// Declare implicit deduction guides for a class template if we've
/// not already done so.
void DeclareImplicitDeductionGuides(TemplateDecl *Template,
SourceLocation Loc);
QualType DeduceTemplateSpecializationFromInitializer(
TypeSourceInfo *TInfo, const InitializedEntity &Entity,
const InitializationKind &Kind, MultiExprArg Init);
QualType deduceVarTypeFromInitializer(VarDecl *VDecl, DeclarationName Name,
QualType Type, TypeSourceInfo *TSI,
SourceRange Range, bool DirectInit,
Expr *Init);
TypeLoc getReturnTypeLoc(FunctionDecl *FD) const;
bool DeduceFunctionTypeFromReturnExpr(FunctionDecl *FD,
SourceLocation ReturnLoc,
Expr *&RetExpr, AutoType *AT);
FunctionTemplateDecl *getMoreSpecializedTemplate(FunctionTemplateDecl *FT1,
FunctionTemplateDecl *FT2,
SourceLocation Loc,
TemplatePartialOrderingContext TPOC,
unsigned NumCallArguments1,
unsigned NumCallArguments2);
UnresolvedSetIterator
getMostSpecialized(UnresolvedSetIterator SBegin, UnresolvedSetIterator SEnd,
TemplateSpecCandidateSet &FailedCandidates,
SourceLocation Loc,
const PartialDiagnostic &NoneDiag,
const PartialDiagnostic &AmbigDiag,
const PartialDiagnostic &CandidateDiag,
bool Complain = true, QualType TargetType = QualType());
ClassTemplatePartialSpecializationDecl *
getMoreSpecializedPartialSpecialization(
ClassTemplatePartialSpecializationDecl *PS1,
ClassTemplatePartialSpecializationDecl *PS2,
SourceLocation Loc);
bool isMoreSpecializedThanPrimary(ClassTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
VarTemplatePartialSpecializationDecl *getMoreSpecializedPartialSpecialization(
VarTemplatePartialSpecializationDecl *PS1,
VarTemplatePartialSpecializationDecl *PS2, SourceLocation Loc);
bool isMoreSpecializedThanPrimary(VarTemplatePartialSpecializationDecl *T,
sema::TemplateDeductionInfo &Info);
bool isTemplateTemplateParameterAtLeastAsSpecializedAs(
TemplateParameterList *P, TemplateDecl *AArg, SourceLocation Loc);
void MarkUsedTemplateParameters(const TemplateArgumentList &TemplateArgs,
bool OnlyDeduced,
unsigned Depth,
llvm::SmallBitVector &Used);
void MarkDeducedTemplateParameters(
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced) {
return MarkDeducedTemplateParameters(Context, FunctionTemplate, Deduced);
}
static void MarkDeducedTemplateParameters(ASTContext &Ctx,
const FunctionTemplateDecl *FunctionTemplate,
llvm::SmallBitVector &Deduced);
//===--------------------------------------------------------------------===//
// C++ Template Instantiation
//
MultiLevelTemplateArgumentList
getTemplateInstantiationArgs(NamedDecl *D,
const TemplateArgumentList *Innermost = nullptr,
bool RelativeToPrimary = false,
const FunctionDecl *Pattern = nullptr);
/// A context in which code is being synthesized (where a source location
/// alone is not sufficient to identify the context). This covers template
/// instantiation and various forms of implicitly-generated functions.
struct CodeSynthesisContext {
/// The kind of template instantiation we are performing
enum SynthesisKind {
/// We are instantiating a template declaration. The entity is
/// the declaration we're instantiating (e.g., a CXXRecordDecl).
TemplateInstantiation,
/// We are instantiating a default argument for a template
/// parameter. The Entity is the template parameter whose argument is
/// being instantiated, the Template is the template, and the
/// TemplateArgs/NumTemplateArguments provide the template arguments as
/// specified.
DefaultTemplateArgumentInstantiation,
/// We are instantiating a default argument for a function.
/// The Entity is the ParmVarDecl, and TemplateArgs/NumTemplateArgs
/// provides the template arguments as specified.
DefaultFunctionArgumentInstantiation,
/// We are substituting explicit template arguments provided for
/// a function template. The entity is a FunctionTemplateDecl.
ExplicitTemplateArgumentSubstitution,
/// We are substituting template argument determined as part of
/// template argument deduction for either a class template
/// partial specialization or a function template. The
/// Entity is either a {Class|Var}TemplatePartialSpecializationDecl or
/// a TemplateDecl.
DeducedTemplateArgumentSubstitution,
/// We are substituting prior template arguments into a new
/// template parameter. The template parameter itself is either a
/// NonTypeTemplateParmDecl or a TemplateTemplateParmDecl.
PriorTemplateArgumentSubstitution,
/// We are checking the validity of a default template argument that
/// has been used when naming a template-id.
DefaultTemplateArgumentChecking,
/// We are computing the exception specification for a defaulted special
/// member function.
ExceptionSpecEvaluation,
/// We are instantiating the exception specification for a function
/// template which was deferred until it was needed.
ExceptionSpecInstantiation,
/// We are declaring an implicit special member function.
DeclaringSpecialMember,
/// We are defining a synthesized function (such as a defaulted special
/// member).
DefiningSynthesizedFunction,
/// Added for Template instantiation observation.
/// Memoization means we are _not_ instantiating a template because
/// it is already instantiated (but we entered a context where we
/// would have had to if it was not already instantiated).
Memoization
} Kind;
/// Was the enclosing context a non-instantiation SFINAE context?
bool SavedInNonInstantiationSFINAEContext;
/// The point of instantiation or synthesis within the source code.
SourceLocation PointOfInstantiation;
/// The entity that is being synthesized.
Decl *Entity;
/// The template (or partial specialization) in which we are
/// performing the instantiation, for substitutions of prior template
/// arguments.
NamedDecl *Template;
/// The list of template arguments we are substituting, if they
/// are not part of the entity.
const TemplateArgument *TemplateArgs;
// FIXME: Wrap this union around more members, or perhaps store the
// kind-specific members in the RAII object owning the context.
union {
/// The number of template arguments in TemplateArgs.
unsigned NumTemplateArgs;
/// The special member being declared or defined.
CXXSpecialMember SpecialMember;
};
ArrayRef<TemplateArgument> template_arguments() const {
assert(Kind != DeclaringSpecialMember);
return {TemplateArgs, NumTemplateArgs};
}
/// The template deduction info object associated with the
/// substitution or checking of explicit or deduced template arguments.
sema::TemplateDeductionInfo *DeductionInfo;
/// The source range that covers the construct that cause
/// the instantiation, e.g., the template-id that causes a class
/// template instantiation.
SourceRange InstantiationRange;
CodeSynthesisContext()
: Kind(TemplateInstantiation),
SavedInNonInstantiationSFINAEContext(false), Entity(nullptr),
Template(nullptr), TemplateArgs(nullptr), NumTemplateArgs(0),
DeductionInfo(nullptr) {}
/// Determines whether this template is an actual instantiation
/// that should be counted toward the maximum instantiation depth.
bool isInstantiationRecord() const;
};
/// List of active code synthesis contexts.
///
/// This vector is treated as a stack. As synthesis of one entity requires
/// synthesis of another, additional contexts are pushed onto the stack.
SmallVector<CodeSynthesisContext, 16> CodeSynthesisContexts;
/// Specializations whose definitions are currently being instantiated.
llvm::DenseSet<std::pair<Decl *, unsigned>> InstantiatingSpecializations;
/// Non-dependent types used in templates that have already been instantiated
/// by some template instantiation.
llvm::DenseSet<QualType> InstantiatedNonDependentTypes;
/// Extra modules inspected when performing a lookup during a template
/// instantiation. Computed lazily.
SmallVector<Module*, 16> CodeSynthesisContextLookupModules;
/// Cache of additional modules that should be used for name lookup
/// within the current template instantiation. Computed lazily; use
/// getLookupModules() to get a complete set.
llvm::DenseSet<Module*> LookupModulesCache;
/// Get the set of additional modules that should be checked during
/// name lookup. A module and its imports become visible when instanting a
/// template defined within it.
llvm::DenseSet<Module*> &getLookupModules();
/// Map from the most recent declaration of a namespace to the most
/// recent visible declaration of that namespace.
llvm::DenseMap<NamedDecl*, NamedDecl*> VisibleNamespaceCache;
/// Whether we are in a SFINAE context that is not associated with
/// template instantiation.
///
/// This is used when setting up a SFINAE trap (\c see SFINAETrap) outside
/// of a template instantiation or template argument deduction.
bool InNonInstantiationSFINAEContext;
/// The number of \p CodeSynthesisContexts that are not template
/// instantiations and, therefore, should not be counted as part of the
/// instantiation depth.
///
/// When the instantiation depth reaches the user-configurable limit
/// \p LangOptions::InstantiationDepth we will abort instantiation.
// FIXME: Should we have a similar limit for other forms of synthesis?
unsigned NonInstantiationEntries;
/// The depth of the context stack at the point when the most recent
/// error or warning was produced.
///
/// This value is used to suppress printing of redundant context stacks
/// when there are multiple errors or warnings in the same instantiation.
// FIXME: Does this belong in Sema? It's tough to implement it anywhere else.
unsigned LastEmittedCodeSynthesisContextDepth = 0;
/// The template instantiation callbacks to trace or track
/// instantiations (objects can be chained).
///
/// This callbacks is used to print, trace or track template
/// instantiations as they are being constructed.
std::vector<std::unique_ptr<TemplateInstantiationCallback>>
TemplateInstCallbacks;
/// The current index into pack expansion arguments that will be
/// used for substitution of parameter packs.
///
/// The pack expansion index will be -1 to indicate that parameter packs
/// should be instantiated as themselves. Otherwise, the index specifies
/// which argument within the parameter pack will be used for substitution.
int ArgumentPackSubstitutionIndex;
/// RAII object used to change the argument pack substitution index
/// within a \c Sema object.
///
/// See \c ArgumentPackSubstitutionIndex for more information.
class ArgumentPackSubstitutionIndexRAII {
Sema &Self;
int OldSubstitutionIndex;
public:
ArgumentPackSubstitutionIndexRAII(Sema &Self, int NewSubstitutionIndex)
: Self(Self), OldSubstitutionIndex(Self.ArgumentPackSubstitutionIndex) {
Self.ArgumentPackSubstitutionIndex = NewSubstitutionIndex;
}
~ArgumentPackSubstitutionIndexRAII() {
Self.ArgumentPackSubstitutionIndex = OldSubstitutionIndex;
}
};
friend class ArgumentPackSubstitutionRAII;
/// For each declaration that involved template argument deduction, the
/// set of diagnostics that were suppressed during that template argument
/// deduction.
///
/// FIXME: Serialize this structure to the AST file.
typedef llvm::DenseMap<Decl *, SmallVector<PartialDiagnosticAt, 1> >
SuppressedDiagnosticsMap;
SuppressedDiagnosticsMap SuppressedDiagnostics;
/// A stack object to be created when performing template
/// instantiation.
///
/// Construction of an object of type \c InstantiatingTemplate
/// pushes the current instantiation onto the stack of active
/// instantiations. If the size of this stack exceeds the maximum
/// number of recursive template instantiations, construction
/// produces an error and evaluates true.
///
/// Destruction of this object will pop the named instantiation off
/// the stack.
struct InstantiatingTemplate {
/// Note that we are instantiating a class template,
/// function template, variable template, alias template,
/// or a member thereof.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
Decl *Entity,
SourceRange InstantiationRange = SourceRange());
struct ExceptionSpecification {};
/// Note that we are instantiating an exception specification
/// of a function template.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionDecl *Entity, ExceptionSpecification,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument in a
/// template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateParameter Param, TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting either explicitly-specified or
/// deduced template arguments during function template argument deduction.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
FunctionTemplateDecl *FunctionTemplate,
ArrayRef<TemplateArgument> TemplateArgs,
CodeSynthesisContext::SynthesisKind Kind,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template declaration.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a class template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ClassTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating as part of template
/// argument deduction for a variable template partial
/// specialization.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
VarTemplatePartialSpecializationDecl *PartialSpec,
ArrayRef<TemplateArgument> TemplateArgs,
sema::TemplateDeductionInfo &DeductionInfo,
SourceRange InstantiationRange = SourceRange());
/// Note that we are instantiating a default argument for a function
/// parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
ParmVarDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange = SourceRange());
/// Note that we are substituting prior template arguments into a
/// non-type parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
NonTypeTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are substituting prior template arguments into a
/// template template parameter.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
NamedDecl *Template,
TemplateTemplateParmDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we are checking the default template argument
/// against the template parameter for a given template-id.
InstantiatingTemplate(Sema &SemaRef, SourceLocation PointOfInstantiation,
TemplateDecl *Template,
NamedDecl *Param,
ArrayRef<TemplateArgument> TemplateArgs,
SourceRange InstantiationRange);
/// Note that we have finished instantiating this template.
void Clear();
~InstantiatingTemplate() { Clear(); }
/// Determines whether we have exceeded the maximum
/// recursive template instantiations.
bool isInvalid() const { return Invalid; }
/// Determine whether we are already instantiating this
/// specialization in some surrounding active instantiation.
bool isAlreadyInstantiating() const { return AlreadyInstantiating; }
private:
Sema &SemaRef;
bool Invalid;
bool AlreadyInstantiating;
bool CheckInstantiationDepth(SourceLocation PointOfInstantiation,
SourceRange InstantiationRange);
InstantiatingTemplate(
Sema &SemaRef, CodeSynthesisContext::SynthesisKind Kind,
SourceLocation PointOfInstantiation, SourceRange InstantiationRange,
Decl *Entity, NamedDecl *Template = nullptr,
ArrayRef<TemplateArgument> TemplateArgs = None,
sema::TemplateDeductionInfo *DeductionInfo = nullptr);
InstantiatingTemplate(const InstantiatingTemplate&) = delete;
InstantiatingTemplate&
operator=(const InstantiatingTemplate&) = delete;
};
void pushCodeSynthesisContext(CodeSynthesisContext Ctx);
void popCodeSynthesisContext();
/// Determine whether we are currently performing template instantiation.
bool inTemplateInstantiation() const {
return CodeSynthesisContexts.size() > NonInstantiationEntries;
}
void PrintContextStack() {
if (!CodeSynthesisContexts.empty() &&
CodeSynthesisContexts.size() != LastEmittedCodeSynthesisContextDepth) {
PrintInstantiationStack();
LastEmittedCodeSynthesisContextDepth = CodeSynthesisContexts.size();
}
if (PragmaAttributeCurrentTargetDecl)
PrintPragmaAttributeInstantiationPoint();
}
void PrintInstantiationStack();
void PrintPragmaAttributeInstantiationPoint();
/// Determines whether we are currently in a context where
/// template argument substitution failures are not considered
/// errors.
///
/// \returns An empty \c Optional if we're not in a SFINAE context.
/// Otherwise, contains a pointer that, if non-NULL, contains the nearest
/// template-deduction context object, which can be used to capture
/// diagnostics that will be suppressed.
Optional<sema::TemplateDeductionInfo *> isSFINAEContext() const;
/// Determines whether we are currently in a context that
/// is not evaluated as per C++ [expr] p5.
bool isUnevaluatedContext() const {
assert(!ExprEvalContexts.empty() &&
"Must be in an expression evaluation context");
return ExprEvalContexts.back().isUnevaluated();
}
/// RAII class used to determine whether SFINAE has
/// trapped any errors that occur during template argument
/// deduction.
class SFINAETrap {
Sema &SemaRef;
unsigned PrevSFINAEErrors;
bool PrevInNonInstantiationSFINAEContext;
bool PrevAccessCheckingSFINAE;
bool PrevLastDiagnosticIgnored;
public:
explicit SFINAETrap(Sema &SemaRef, bool AccessCheckingSFINAE = false)
: SemaRef(SemaRef), PrevSFINAEErrors(SemaRef.NumSFINAEErrors),
PrevInNonInstantiationSFINAEContext(
SemaRef.InNonInstantiationSFINAEContext),
PrevAccessCheckingSFINAE(SemaRef.AccessCheckingSFINAE),
PrevLastDiagnosticIgnored(
SemaRef.getDiagnostics().isLastDiagnosticIgnored())
{
if (!SemaRef.isSFINAEContext())
SemaRef.InNonInstantiationSFINAEContext = true;
SemaRef.AccessCheckingSFINAE = AccessCheckingSFINAE;
}
~SFINAETrap() {
SemaRef.NumSFINAEErrors = PrevSFINAEErrors;
SemaRef.InNonInstantiationSFINAEContext
= PrevInNonInstantiationSFINAEContext;
SemaRef.AccessCheckingSFINAE = PrevAccessCheckingSFINAE;
SemaRef.getDiagnostics().setLastDiagnosticIgnored(
PrevLastDiagnosticIgnored);
}
/// Determine whether any SFINAE errors have been trapped.
bool hasErrorOccurred() const {
return SemaRef.NumSFINAEErrors > PrevSFINAEErrors;
}
};
/// RAII class used to indicate that we are performing provisional
/// semantic analysis to determine the validity of a construct, so
/// typo-correction and diagnostics in the immediate context (not within
/// implicitly-instantiated templates) should be suppressed.
class TentativeAnalysisScope {
Sema &SemaRef;
// FIXME: Using a SFINAETrap for this is a hack.
SFINAETrap Trap;
bool PrevDisableTypoCorrection;
public:
explicit TentativeAnalysisScope(Sema &SemaRef)
: SemaRef(SemaRef), Trap(SemaRef, true),
PrevDisableTypoCorrection(SemaRef.DisableTypoCorrection) {
SemaRef.DisableTypoCorrection = true;
}
~TentativeAnalysisScope() {
SemaRef.DisableTypoCorrection = PrevDisableTypoCorrection;
}
};
/// The current instantiation scope used to store local
/// variables.
LocalInstantiationScope *CurrentInstantiationScope;
/// Tracks whether we are in a context where typo correction is
/// disabled.
bool DisableTypoCorrection;
/// The number of typos corrected by CorrectTypo.
unsigned TyposCorrected;
typedef llvm::SmallSet<SourceLocation, 2> SrcLocSet;
typedef llvm::DenseMap<IdentifierInfo *, SrcLocSet> IdentifierSourceLocations;
/// A cache containing identifiers for which typo correction failed and
/// their locations, so that repeated attempts to correct an identifier in a
/// given location are ignored if typo correction already failed for it.
IdentifierSourceLocations TypoCorrectionFailures;
/// Worker object for performing CFG-based warnings.
sema::AnalysisBasedWarnings AnalysisWarnings;
threadSafety::BeforeSet *ThreadSafetyDeclCache;
/// An entity for which implicit template instantiation is required.
///
/// The source location associated with the declaration is the first place in
/// the source code where the declaration was "used". It is not necessarily
/// the point of instantiation (which will be either before or after the
/// namespace-scope declaration that triggered this implicit instantiation),
/// However, it is the location that diagnostics should generally refer to,
/// because users will need to know what code triggered the instantiation.
typedef std::pair<ValueDecl *, SourceLocation> PendingImplicitInstantiation;
/// The queue of implicit template instantiations that are required
/// but have not yet been performed.
std::deque<PendingImplicitInstantiation> PendingInstantiations;
/// Queue of implicit template instantiations that cannot be performed
/// eagerly.
SmallVector<PendingImplicitInstantiation, 1> LateParsedInstantiations;
class GlobalEagerInstantiationScope {
public:
GlobalEagerInstantiationScope(Sema &S, bool Enabled)
: S(S), Enabled(Enabled) {
if (!Enabled) return;
SavedPendingInstantiations.swap(S.PendingInstantiations);
SavedVTableUses.swap(S.VTableUses);
}
void perform() {
if (Enabled) {
S.DefineUsedVTables();
S.PerformPendingInstantiations();
}
}
~GlobalEagerInstantiationScope() {
if (!Enabled) return;
// Restore the set of pending vtables.
assert(S.VTableUses.empty() &&
"VTableUses should be empty before it is discarded.");
S.VTableUses.swap(SavedVTableUses);
// Restore the set of pending implicit instantiations.
assert(S.PendingInstantiations.empty() &&
"PendingInstantiations should be empty before it is discarded.");
S.PendingInstantiations.swap(SavedPendingInstantiations);
}
private:
Sema &S;
SmallVector<VTableUse, 16> SavedVTableUses;
std::deque<PendingImplicitInstantiation> SavedPendingInstantiations;
bool Enabled;
};
/// The queue of implicit template instantiations that are required
/// and must be performed within the current local scope.
///
/// This queue is only used for member functions of local classes in
/// templates, which must be instantiated in the same scope as their
/// enclosing function, so that they can reference function-local
/// types, static variables, enumerators, etc.
std::deque<PendingImplicitInstantiation> PendingLocalImplicitInstantiations;
class LocalEagerInstantiationScope {
public:
LocalEagerInstantiationScope(Sema &S) : S(S) {
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
void perform() { S.PerformPendingInstantiations(/*LocalOnly=*/true); }
~LocalEagerInstantiationScope() {
assert(S.PendingLocalImplicitInstantiations.empty() &&
"there shouldn't be any pending local implicit instantiations");
SavedPendingLocalImplicitInstantiations.swap(
S.PendingLocalImplicitInstantiations);
}
private:
Sema &S;
std::deque<PendingImplicitInstantiation>
SavedPendingLocalImplicitInstantiations;
};
/// A helper class for building up ExtParameterInfos.
class ExtParameterInfoBuilder {
SmallVector<FunctionProtoType::ExtParameterInfo, 16> Infos;
bool HasInteresting = false;
public:
/// Set the ExtParameterInfo for the parameter at the given index,
///
void set(unsigned index, FunctionProtoType::ExtParameterInfo info) {
assert(Infos.size() <= index);
Infos.resize(index);
Infos.push_back(info);
if (!HasInteresting)
HasInteresting = (info != FunctionProtoType::ExtParameterInfo());
}
/// Return a pointer (suitable for setting in an ExtProtoInfo) to the
/// ExtParameterInfo array we've built up.
const FunctionProtoType::ExtParameterInfo *
getPointerOrNull(unsigned numParams) {
if (!HasInteresting) return nullptr;
Infos.resize(numParams);
return Infos.data();
}
};
void PerformPendingInstantiations(bool LocalOnly = false);
TypeSourceInfo *SubstType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity,
bool AllowDeducedTST = false);
QualType SubstType(QualType T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstType(TypeLoc TL,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc, DeclarationName Entity);
TypeSourceInfo *SubstFunctionDeclType(TypeSourceInfo *T,
const MultiLevelTemplateArgumentList &TemplateArgs,
SourceLocation Loc,
DeclarationName Entity,
CXXRecordDecl *ThisContext,
Qualifiers ThisTypeQuals);
void SubstExceptionSpec(FunctionDecl *New, const FunctionProtoType *Proto,
const MultiLevelTemplateArgumentList &Args);
bool SubstExceptionSpec(SourceLocation Loc,
FunctionProtoType::ExceptionSpecInfo &ESI,
SmallVectorImpl<QualType> &ExceptionStorage,
const MultiLevelTemplateArgumentList &Args);
ParmVarDecl *SubstParmVarDecl(ParmVarDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
int indexAdjustment,
Optional<unsigned> NumExpansions,
bool ExpectParameterPack);
bool SubstParmTypes(SourceLocation Loc, ArrayRef<ParmVarDecl *> Params,
const FunctionProtoType::ExtParameterInfo *ExtParamInfos,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<QualType> &ParamTypes,
SmallVectorImpl<ParmVarDecl *> *OutParams,
ExtParameterInfoBuilder &ParamInfos);
ExprResult SubstExpr(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs);
/// Substitute the given template arguments into a list of
/// expressions, expanding pack expansions if required.
///
/// \param Exprs The list of expressions to substitute into.
///
/// \param IsCall Whether this is some form of call, in which case
/// default arguments will be dropped.
///
/// \param TemplateArgs The set of template arguments to substitute.
///
/// \param Outputs Will receive all of the substituted arguments.
///
/// \returns true if an error occurred, false otherwise.
bool SubstExprs(ArrayRef<Expr *> Exprs, bool IsCall,
const MultiLevelTemplateArgumentList &TemplateArgs,
SmallVectorImpl<Expr *> &Outputs);
StmtResult SubstStmt(Stmt *S,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateParameterList *
SubstTemplateParams(TemplateParameterList *Params, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
Decl *SubstDecl(Decl *D, DeclContext *Owner,
const MultiLevelTemplateArgumentList &TemplateArgs);
ExprResult SubstInitializer(Expr *E,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool CXXDirectInit);
bool
SubstBaseSpecifiers(CXXRecordDecl *Instantiation,
CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool
InstantiateClass(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation, CXXRecordDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK,
bool Complain = true);
bool InstantiateEnum(SourceLocation PointOfInstantiation,
EnumDecl *Instantiation, EnumDecl *Pattern,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
bool InstantiateInClassInitializer(
SourceLocation PointOfInstantiation, FieldDecl *Instantiation,
FieldDecl *Pattern, const MultiLevelTemplateArgumentList &TemplateArgs);
struct LateInstantiatedAttribute {
const Attr *TmplAttr;
LocalInstantiationScope *Scope;
Decl *NewDecl;
LateInstantiatedAttribute(const Attr *A, LocalInstantiationScope *S,
Decl *D)
: TmplAttr(A), Scope(S), NewDecl(D)
{ }
};
typedef SmallVector<LateInstantiatedAttribute, 16> LateInstantiatedAttrVec;
void InstantiateAttrs(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
void
InstantiateAttrsForDecl(const MultiLevelTemplateArgumentList &TemplateArgs,
const Decl *Pattern, Decl *Inst,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *OuterMostScope = nullptr);
bool usesPartialOrExplicitSpecialization(
SourceLocation Loc, ClassTemplateSpecializationDecl *ClassTemplateSpec);
bool
InstantiateClassTemplateSpecialization(SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK,
bool Complain = true);
void InstantiateClassMembers(SourceLocation PointOfInstantiation,
CXXRecordDecl *Instantiation,
const MultiLevelTemplateArgumentList &TemplateArgs,
TemplateSpecializationKind TSK);
void InstantiateClassTemplateSpecializationMembers(
SourceLocation PointOfInstantiation,
ClassTemplateSpecializationDecl *ClassTemplateSpec,
TemplateSpecializationKind TSK);
NestedNameSpecifierLoc
SubstNestedNameSpecifierLoc(NestedNameSpecifierLoc NNS,
const MultiLevelTemplateArgumentList &TemplateArgs);
DeclarationNameInfo
SubstDeclarationNameInfo(const DeclarationNameInfo &NameInfo,
const MultiLevelTemplateArgumentList &TemplateArgs);
TemplateName
SubstTemplateName(NestedNameSpecifierLoc QualifierLoc, TemplateName Name,
SourceLocation Loc,
const MultiLevelTemplateArgumentList &TemplateArgs);
bool Subst(const TemplateArgumentLoc *Args, unsigned NumArgs,
TemplateArgumentListInfo &Result,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateExceptionSpec(SourceLocation PointOfInstantiation,
FunctionDecl *Function);
FunctionDecl *InstantiateFunctionDeclaration(FunctionTemplateDecl *FTD,
const TemplateArgumentList *Args,
SourceLocation Loc);
void InstantiateFunctionDefinition(SourceLocation PointOfInstantiation,
FunctionDecl *Function,
bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
VarTemplateSpecializationDecl *BuildVarTemplateInstantiation(
VarTemplateDecl *VarTemplate, VarDecl *FromVar,
const TemplateArgumentList &TemplateArgList,
const TemplateArgumentListInfo &TemplateArgsInfo,
SmallVectorImpl<TemplateArgument> &Converted,
SourceLocation PointOfInstantiation, void *InsertPos,
LateInstantiatedAttrVec *LateAttrs = nullptr,
LocalInstantiationScope *StartingScope = nullptr);
VarTemplateSpecializationDecl *CompleteVarTemplateSpecializationDecl(
VarTemplateSpecializationDecl *VarSpec, VarDecl *PatternDecl,
const MultiLevelTemplateArgumentList &TemplateArgs);
void
BuildVariableInstantiation(VarDecl *NewVar, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs,
LateInstantiatedAttrVec *LateAttrs,
DeclContext *Owner,
LocalInstantiationScope *StartingScope,
bool InstantiatingVarTemplate = false,
VarTemplateSpecializationDecl *PrevVTSD = nullptr);
void InstantiateVariableInitializer(
VarDecl *Var, VarDecl *OldVar,
const MultiLevelTemplateArgumentList &TemplateArgs);
void InstantiateVariableDefinition(SourceLocation PointOfInstantiation,
VarDecl *Var, bool Recursive = false,
bool DefinitionRequired = false,
bool AtEndOfTU = false);
void InstantiateMemInitializers(CXXConstructorDecl *New,
const CXXConstructorDecl *Tmpl,
const MultiLevelTemplateArgumentList &TemplateArgs);
NamedDecl *FindInstantiatedDecl(SourceLocation Loc, NamedDecl *D,
const MultiLevelTemplateArgumentList &TemplateArgs,
bool FindingInstantiatedContext = false);
DeclContext *FindInstantiatedContext(SourceLocation Loc, DeclContext *DC,
const MultiLevelTemplateArgumentList &TemplateArgs);
// Objective-C declarations.
enum ObjCContainerKind {
OCK_None = -1,
OCK_Interface = 0,
OCK_Protocol,
OCK_Category,
OCK_ClassExtension,
OCK_Implementation,
OCK_CategoryImplementation
};
ObjCContainerKind getObjCContainerKind() const;
DeclResult actOnObjCTypeParam(Scope *S,
ObjCTypeParamVariance variance,
SourceLocation varianceLoc,
unsigned index,
IdentifierInfo *paramName,
SourceLocation paramLoc,
SourceLocation colonLoc,
ParsedType typeBound);
ObjCTypeParamList *actOnObjCTypeParamList(Scope *S, SourceLocation lAngleLoc,
ArrayRef<Decl *> typeParams,
SourceLocation rAngleLoc);
void popObjCTypeParamList(Scope *S, ObjCTypeParamList *typeParamList);
Decl *ActOnStartClassInterface(
Scope *S, SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *SuperName, SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs, SourceRange SuperTypeArgsRange,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
void ActOnSuperClassOfClassInterface(Scope *S,
SourceLocation AtInterfaceLoc,
ObjCInterfaceDecl *IDecl,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperName,
SourceLocation SuperLoc,
ArrayRef<ParsedType> SuperTypeArgs,
SourceRange SuperTypeArgsRange);
void ActOnTypedefedProtocols(SmallVectorImpl<Decl *> &ProtocolRefs,
SmallVectorImpl<SourceLocation> &ProtocolLocs,
IdentifierInfo *SuperName,
SourceLocation SuperLoc);
Decl *ActOnCompatibilityAlias(
SourceLocation AtCompatibilityAliasLoc,
IdentifierInfo *AliasName, SourceLocation AliasLocation,
IdentifierInfo *ClassName, SourceLocation ClassLocation);
bool CheckForwardProtocolDeclarationForCircularDependency(
IdentifierInfo *PName,
SourceLocation &PLoc, SourceLocation PrevLoc,
const ObjCList<ObjCProtocolDecl> &PList);
Decl *ActOnStartProtocolInterface(
SourceLocation AtProtoInterfaceLoc, IdentifierInfo *ProtocolName,
SourceLocation ProtocolLoc, Decl *const *ProtoRefNames,
unsigned NumProtoRefs, const SourceLocation *ProtoLocs,
SourceLocation EndProtoLoc, const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryInterface(
SourceLocation AtInterfaceLoc, IdentifierInfo *ClassName,
SourceLocation ClassLoc, ObjCTypeParamList *typeParamList,
IdentifierInfo *CategoryName, SourceLocation CategoryLoc,
Decl *const *ProtoRefs, unsigned NumProtoRefs,
const SourceLocation *ProtoLocs, SourceLocation EndProtoLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartClassImplementation(SourceLocation AtClassImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *SuperClassname,
SourceLocation SuperClassLoc,
const ParsedAttributesView &AttrList);
Decl *ActOnStartCategoryImplementation(SourceLocation AtCatImplLoc,
IdentifierInfo *ClassName,
SourceLocation ClassLoc,
IdentifierInfo *CatName,
SourceLocation CatLoc,
const ParsedAttributesView &AttrList);
DeclGroupPtrTy ActOnFinishObjCImplementation(Decl *ObjCImpDecl,
ArrayRef<Decl *> Decls);
DeclGroupPtrTy ActOnForwardClassDeclaration(SourceLocation Loc,
IdentifierInfo **IdentList,
SourceLocation *IdentLocs,
ArrayRef<ObjCTypeParamList *> TypeParamLists,
unsigned NumElts);
DeclGroupPtrTy
ActOnForwardProtocolDeclaration(SourceLocation AtProtoclLoc,
ArrayRef<IdentifierLocPair> IdentList,
const ParsedAttributesView &attrList);
void FindProtocolDeclaration(bool WarnOnDeclarations, bool ForObjCContainer,
ArrayRef<IdentifierLocPair> ProtocolId,
SmallVectorImpl<Decl *> &Protocols);
void DiagnoseTypeArgsAndProtocols(IdentifierInfo *ProtocolId,
SourceLocation ProtocolLoc,
IdentifierInfo *TypeArgId,
SourceLocation TypeArgLoc,
bool SelectProtocolFirst = false);
/// Given a list of identifiers (and their locations), resolve the
/// names to either Objective-C protocol qualifiers or type
/// arguments, as appropriate.
void actOnObjCTypeArgsOrProtocolQualifiers(
Scope *S,
ParsedType baseType,
SourceLocation lAngleLoc,
ArrayRef<IdentifierInfo *> identifiers,
ArrayRef<SourceLocation> identifierLocs,
SourceLocation rAngleLoc,
SourceLocation &typeArgsLAngleLoc,
SmallVectorImpl<ParsedType> &typeArgs,
SourceLocation &typeArgsRAngleLoc,
SourceLocation &protocolLAngleLoc,
SmallVectorImpl<Decl *> &protocols,
SourceLocation &protocolRAngleLoc,
bool warnOnIncompleteProtocols);
/// Build a an Objective-C protocol-qualified 'id' type where no
/// base type was specified.
TypeResult actOnObjCProtocolQualifierType(
SourceLocation lAngleLoc,
ArrayRef<Decl *> protocols,
ArrayRef<SourceLocation> protocolLocs,
SourceLocation rAngleLoc);
/// Build a specialized and/or protocol-qualified Objective-C type.
TypeResult actOnObjCTypeArgsAndProtocolQualifiers(
Scope *S,
SourceLocation Loc,
ParsedType BaseType,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<ParsedType> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<Decl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc);
/// Build an Objective-C type parameter type.
QualType BuildObjCTypeParamType(const ObjCTypeParamDecl *Decl,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Build an Objective-C object pointer type.
QualType BuildObjCObjectType(QualType BaseType,
SourceLocation Loc,
SourceLocation TypeArgsLAngleLoc,
ArrayRef<TypeSourceInfo *> TypeArgs,
SourceLocation TypeArgsRAngleLoc,
SourceLocation ProtocolLAngleLoc,
ArrayRef<ObjCProtocolDecl *> Protocols,
ArrayRef<SourceLocation> ProtocolLocs,
SourceLocation ProtocolRAngleLoc,
bool FailOnError = false);
/// Ensure attributes are consistent with type.
/// \param [in, out] Attributes The attributes to check; they will
/// be modified to be consistent with \p PropertyTy.
void CheckObjCPropertyAttributes(Decl *PropertyPtrTy,
SourceLocation Loc,
unsigned &Attributes,
bool propertyInPrimaryClass);
/// Process the specified property declaration and create decls for the
/// setters and getters as needed.
/// \param property The property declaration being processed
void ProcessPropertyDecl(ObjCPropertyDecl *property);
void DiagnosePropertyMismatch(ObjCPropertyDecl *Property,
ObjCPropertyDecl *SuperProperty,
const IdentifierInfo *Name,
bool OverridingProtocolProperty);
void DiagnoseClassExtensionDupMethods(ObjCCategoryDecl *CAT,
ObjCInterfaceDecl *ID);
Decl *ActOnAtEnd(Scope *S, SourceRange AtEnd,
ArrayRef<Decl *> allMethods = None,
ArrayRef<DeclGroupPtrTy> allTUVars = None);
Decl *ActOnProperty(Scope *S, SourceLocation AtLoc,
SourceLocation LParenLoc,
FieldDeclarator &FD, ObjCDeclSpec &ODS,
Selector GetterSel, Selector SetterSel,
tok::ObjCKeywordKind MethodImplKind,
DeclContext *lexicalDC = nullptr);
Decl *ActOnPropertyImplDecl(Scope *S,
SourceLocation AtLoc,
SourceLocation PropertyLoc,
bool ImplKind,
IdentifierInfo *PropertyId,
IdentifierInfo *PropertyIvar,
SourceLocation PropertyIvarLoc,
ObjCPropertyQueryKind QueryKind);
enum ObjCSpecialMethodKind {
OSMK_None,
OSMK_Alloc,
OSMK_New,
OSMK_Copy,
OSMK_RetainingInit,
OSMK_NonRetainingInit
};
struct ObjCArgInfo {
IdentifierInfo *Name;
SourceLocation NameLoc;
// The Type is null if no type was specified, and the DeclSpec is invalid
// in this case.
ParsedType Type;
ObjCDeclSpec DeclSpec;
/// ArgAttrs - Attribute list for this argument.
ParsedAttributesView ArgAttrs;
};
Decl *ActOnMethodDeclaration(
Scope *S,
SourceLocation BeginLoc, // location of the + or -.
SourceLocation EndLoc, // location of the ; or {.
tok::TokenKind MethodType, ObjCDeclSpec &ReturnQT, ParsedType ReturnType,
ArrayRef<SourceLocation> SelectorLocs, Selector Sel,
// optional arguments. The number of types/arguments is obtained
// from the Sel.getNumArgs().
ObjCArgInfo *ArgInfo, DeclaratorChunk::ParamInfo *CParamInfo,
unsigned CNumArgs, // c-style args
const ParsedAttributesView &AttrList, tok::ObjCKeywordKind MethodImplKind,
bool isVariadic, bool MethodDefinition);
ObjCMethodDecl *LookupMethodInQualifiedType(Selector Sel,
const ObjCObjectPointerType *OPT,
bool IsInstance);
ObjCMethodDecl *LookupMethodInObjectType(Selector Sel, QualType Ty,
bool IsInstance);
bool CheckARCMethodDecl(ObjCMethodDecl *method);
bool inferObjCARCLifetime(ValueDecl *decl);
ExprResult
HandleExprPropertyRefExpr(const ObjCObjectPointerType *OPT,
Expr *BaseExpr,
SourceLocation OpLoc,
DeclarationName MemberName,
SourceLocation MemberLoc,
SourceLocation SuperLoc, QualType SuperType,
bool Super);
ExprResult
ActOnClassPropertyRefExpr(IdentifierInfo &receiverName,
IdentifierInfo &propertyName,
SourceLocation receiverNameLoc,
SourceLocation propertyNameLoc);
ObjCMethodDecl *tryCaptureObjCSelf(SourceLocation Loc);
/// Describes the kind of message expression indicated by a message
/// send that starts with an identifier.
enum ObjCMessageKind {
/// The message is sent to 'super'.
ObjCSuperMessage,
/// The message is an instance message.
ObjCInstanceMessage,
/// The message is a class message, and the identifier is a type
/// name.
ObjCClassMessage
};
ObjCMessageKind getObjCMessageKind(Scope *S,
IdentifierInfo *Name,
SourceLocation NameLoc,
bool IsSuper,
bool HasTrailingDot,
ParsedType &ReceiverType);
ExprResult ActOnSuperMessage(Scope *S, SourceLocation SuperLoc,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildClassMessage(TypeSourceInfo *ReceiverTypeInfo,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildClassMessageImplicit(QualType ReceiverType,
bool isSuperReceiver,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnClassMessage(Scope *S,
ParsedType Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildInstanceMessage(Expr *Receiver,
QualType ReceiverType,
SourceLocation SuperLoc,
Selector Sel,
ObjCMethodDecl *Method,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args,
bool isImplicit = false);
ExprResult BuildInstanceMessageImplicit(Expr *Receiver,
QualType ReceiverType,
SourceLocation Loc,
Selector Sel,
ObjCMethodDecl *Method,
MultiExprArg Args);
ExprResult ActOnInstanceMessage(Scope *S,
Expr *Receiver,
Selector Sel,
SourceLocation LBracLoc,
ArrayRef<SourceLocation> SelectorLocs,
SourceLocation RBracLoc,
MultiExprArg Args);
ExprResult BuildObjCBridgedCast(SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
TypeSourceInfo *TSInfo,
Expr *SubExpr);
ExprResult ActOnObjCBridgedCast(Scope *S,
SourceLocation LParenLoc,
ObjCBridgeCastKind Kind,
SourceLocation BridgeKeywordLoc,
ParsedType Type,
SourceLocation RParenLoc,
Expr *SubExpr);
void CheckTollFreeBridgeCast(QualType castType, Expr *castExpr);
void CheckObjCBridgeRelatedCast(QualType castType, Expr *castExpr);
bool CheckTollFreeBridgeStaticCast(QualType castType, Expr *castExpr,
CastKind &Kind);
bool checkObjCBridgeRelatedComponents(SourceLocation Loc,
QualType DestType, QualType SrcType,
ObjCInterfaceDecl *&RelatedClass,
ObjCMethodDecl *&ClassMethod,
ObjCMethodDecl *&InstanceMethod,
TypedefNameDecl *&TDNDecl,
bool CfToNs, bool Diagnose = true);
bool CheckObjCBridgeRelatedConversions(SourceLocation Loc,
QualType DestType, QualType SrcType,
Expr *&SrcExpr, bool Diagnose = true);
bool ConversionToObjCStringLiteralCheck(QualType DstType, Expr *&SrcExpr,
bool Diagnose = true);
bool checkInitMethod(ObjCMethodDecl *method, QualType receiverTypeIfCall);
/// Check whether the given new method is a valid override of the
/// given overridden method, and set any properties that should be inherited.
void CheckObjCMethodOverride(ObjCMethodDecl *NewMethod,
const ObjCMethodDecl *Overridden);
/// Describes the compatibility of a result type with its method.
enum ResultTypeCompatibilityKind {
RTC_Compatible,
RTC_Incompatible,
RTC_Unknown
};
void CheckObjCMethodOverrides(ObjCMethodDecl *ObjCMethod,
ObjCInterfaceDecl *CurrentClass,
ResultTypeCompatibilityKind RTC);
enum PragmaOptionsAlignKind {
POAK_Native, // #pragma options align=native
POAK_Natural, // #pragma options align=natural
POAK_Packed, // #pragma options align=packed
POAK_Power, // #pragma options align=power
POAK_Mac68k, // #pragma options align=mac68k
POAK_Reset // #pragma options align=reset
};
/// ActOnPragmaClangSection - Called on well formed \#pragma clang section
void ActOnPragmaClangSection(SourceLocation PragmaLoc,
PragmaClangSectionAction Action,
PragmaClangSectionKind SecKind, StringRef SecName);
/// ActOnPragmaOptionsAlign - Called on well formed \#pragma options align.
void ActOnPragmaOptionsAlign(PragmaOptionsAlignKind Kind,
SourceLocation PragmaLoc);
/// ActOnPragmaPack - Called on well formed \#pragma pack(...).
void ActOnPragmaPack(SourceLocation PragmaLoc, PragmaMsStackAction Action,
StringRef SlotLabel, Expr *Alignment);
enum class PragmaPackDiagnoseKind {
NonDefaultStateAtInclude,
ChangedStateAtExit
};
void DiagnoseNonDefaultPragmaPack(PragmaPackDiagnoseKind Kind,
SourceLocation IncludeLoc);
void DiagnoseUnterminatedPragmaPack();
/// ActOnPragmaMSStruct - Called on well formed \#pragma ms_struct [on|off].
void ActOnPragmaMSStruct(PragmaMSStructKind Kind);
/// ActOnPragmaMSComment - Called on well formed
/// \#pragma comment(kind, "arg").
void ActOnPragmaMSComment(SourceLocation CommentLoc, PragmaMSCommentKind Kind,
StringRef Arg);
/// ActOnPragmaMSPointersToMembers - called on well formed \#pragma
/// pointers_to_members(representation method[, general purpose
/// representation]).
void ActOnPragmaMSPointersToMembers(
LangOptions::PragmaMSPointersToMembersKind Kind,
SourceLocation PragmaLoc);
/// Called on well formed \#pragma vtordisp().
void ActOnPragmaMSVtorDisp(PragmaMsStackAction Action,
SourceLocation PragmaLoc,
MSVtorDispAttr::Mode Value);
enum PragmaSectionKind {
PSK_DataSeg,
PSK_BSSSeg,
PSK_ConstSeg,
PSK_CodeSeg,
};
bool UnifySection(StringRef SectionName,
int SectionFlags,
DeclaratorDecl *TheDecl);
bool UnifySection(StringRef SectionName,
int SectionFlags,
SourceLocation PragmaSectionLocation);
/// Called on well formed \#pragma bss_seg/data_seg/const_seg/code_seg.
void ActOnPragmaMSSeg(SourceLocation PragmaLocation,
PragmaMsStackAction Action,
llvm::StringRef StackSlotLabel,
StringLiteral *SegmentName,
llvm::StringRef PragmaName);
/// Called on well formed \#pragma section().
void ActOnPragmaMSSection(SourceLocation PragmaLocation,
int SectionFlags, StringLiteral *SegmentName);
/// Called on well-formed \#pragma init_seg().
void ActOnPragmaMSInitSeg(SourceLocation PragmaLocation,
StringLiteral *SegmentName);
/// Called on #pragma clang __debug dump II
void ActOnPragmaDump(Scope *S, SourceLocation Loc, IdentifierInfo *II);
/// ActOnPragmaDetectMismatch - Call on well-formed \#pragma detect_mismatch
void ActOnPragmaDetectMismatch(SourceLocation Loc, StringRef Name,
StringRef Value);
/// ActOnPragmaUnused - Called on well-formed '\#pragma unused'.
void ActOnPragmaUnused(const Token &Identifier,
Scope *curScope,
SourceLocation PragmaLoc);
/// ActOnPragmaVisibility - Called on well formed \#pragma GCC visibility... .
void ActOnPragmaVisibility(const IdentifierInfo* VisType,
SourceLocation PragmaLoc);
NamedDecl *DeclClonePragmaWeak(NamedDecl *ND, IdentifierInfo *II,
SourceLocation Loc);
void DeclApplyPragmaWeak(Scope *S, NamedDecl *ND, WeakInfo &W);
/// ActOnPragmaWeakID - Called on well formed \#pragma weak ident.
void ActOnPragmaWeakID(IdentifierInfo* WeakName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc);
/// ActOnPragmaRedefineExtname - Called on well formed
/// \#pragma redefine_extname oldname newname.
void ActOnPragmaRedefineExtname(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaWeakAlias - Called on well formed \#pragma weak ident = ident.
void ActOnPragmaWeakAlias(IdentifierInfo* WeakName,
IdentifierInfo* AliasName,
SourceLocation PragmaLoc,
SourceLocation WeakNameLoc,
SourceLocation AliasNameLoc);
/// ActOnPragmaFPContract - Called on well formed
/// \#pragma {STDC,OPENCL} FP_CONTRACT and
/// \#pragma clang fp contract
void ActOnPragmaFPContract(LangOptions::FPContractModeKind FPC);
/// ActOnPragmaFenvAccess - Called on well formed
/// \#pragma STDC FENV_ACCESS
void ActOnPragmaFEnvAccess(LangOptions::FEnvAccessModeKind FPC);
/// AddAlignmentAttributesForRecord - Adds any needed alignment attributes to
/// a the record decl, to handle '\#pragma pack' and '\#pragma options align'.
void AddAlignmentAttributesForRecord(RecordDecl *RD);
/// AddMsStructLayoutForRecord - Adds ms_struct layout attribute to record.
void AddMsStructLayoutForRecord(RecordDecl *RD);
/// FreePackedContext - Deallocate and null out PackContext.
void FreePackedContext();
/// PushNamespaceVisibilityAttr - Note that we've entered a
/// namespace with a visibility attribute.
void PushNamespaceVisibilityAttr(const VisibilityAttr *Attr,
SourceLocation Loc);
/// AddPushedVisibilityAttribute - If '\#pragma GCC visibility' was used,
/// add an appropriate visibility attribute.
void AddPushedVisibilityAttribute(Decl *RD);
/// PopPragmaVisibility - Pop the top element of the visibility stack; used
/// for '\#pragma GCC visibility' and visibility attributes on namespaces.
void PopPragmaVisibility(bool IsNamespaceEnd, SourceLocation EndLoc);
/// FreeVisContext - Deallocate and null out VisContext.
void FreeVisContext();
/// AddCFAuditedAttribute - Check whether we're currently within
/// '\#pragma clang arc_cf_code_audited' and, if so, consider adding
/// the appropriate attribute.
void AddCFAuditedAttribute(Decl *D);
void ActOnPragmaAttributeAttribute(ParsedAttr &Attribute,
SourceLocation PragmaLoc,
attr::ParsedSubjectMatchRuleSet Rules);
void ActOnPragmaAttributeEmptyPush(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Called on well-formed '\#pragma clang attribute pop'.
void ActOnPragmaAttributePop(SourceLocation PragmaLoc,
const IdentifierInfo *Namespace);
/// Adds the attributes that have been specified using the
/// '\#pragma clang attribute push' directives to the given declaration.
void AddPragmaAttributes(Scope *S, Decl *D);
void DiagnoseUnterminatedPragmaAttribute();
/// Called on well formed \#pragma clang optimize.
void ActOnPragmaOptimize(bool On, SourceLocation PragmaLoc);
/// Get the location for the currently active "\#pragma clang optimize
/// off". If this location is invalid, then the state of the pragma is "on".
SourceLocation getOptimizeOffPragmaLocation() const {
return OptimizeOffPragmaLocation;
}
/// Only called on function definitions; if there is a pragma in scope
/// with the effect of a range-based optnone, consider marking the function
/// with attribute optnone.
void AddRangeBasedOptnone(FunctionDecl *FD);
/// Adds the 'optnone' attribute to the function declaration if there
/// are no conflicts; Loc represents the location causing the 'optnone'
/// attribute to be added (usually because of a pragma).
void AddOptnoneAttributeIfNoConflicts(FunctionDecl *FD, SourceLocation Loc);
/// AddAlignedAttr - Adds an aligned attribute to a particular declaration.
void AddAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex, bool IsPackExpansion);
void AddAlignedAttr(SourceRange AttrRange, Decl *D, TypeSourceInfo *T,
unsigned SpellingListIndex, bool IsPackExpansion);
/// AddAssumeAlignedAttr - Adds an assume_aligned attribute to a particular
/// declaration.
void AddAssumeAlignedAttr(SourceRange AttrRange, Decl *D, Expr *E, Expr *OE,
unsigned SpellingListIndex);
/// AddAllocAlignAttr - Adds an alloc_align attribute to a particular
/// declaration.
void AddAllocAlignAttr(SourceRange AttrRange, Decl *D, Expr *ParamExpr,
unsigned SpellingListIndex);
/// AddAlignValueAttr - Adds an align_value attribute to a particular
/// declaration.
void AddAlignValueAttr(SourceRange AttrRange, Decl *D, Expr *E,
unsigned SpellingListIndex);
/// AddLaunchBoundsAttr - Adds a launch_bounds attribute to a particular
/// declaration.
void AddLaunchBoundsAttr(SourceRange AttrRange, Decl *D, Expr *MaxThreads,
Expr *MinBlocks, unsigned SpellingListIndex);
/// AddModeAttr - Adds a mode attribute to a particular declaration.
void AddModeAttr(SourceRange AttrRange, Decl *D, IdentifierInfo *Name,
unsigned SpellingListIndex, bool InInstantiation = false);
void AddParameterABIAttr(SourceRange AttrRange, Decl *D,
ParameterABI ABI, unsigned SpellingListIndex);
enum class RetainOwnershipKind {NS, CF, OS};
void AddXConsumedAttr(Decl *D, SourceRange SR, unsigned SpellingIndex,
RetainOwnershipKind K, bool IsTemplateInstantiation);
/// addAMDGPUFlatWorkGroupSizeAttr - Adds an amdgpu_flat_work_group_size
/// attribute to a particular declaration.
void addAMDGPUFlatWorkGroupSizeAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
/// addAMDGPUWavePersEUAttr - Adds an amdgpu_waves_per_eu attribute to a
/// particular declaration.
void addAMDGPUWavesPerEUAttr(SourceRange AttrRange, Decl *D, Expr *Min,
Expr *Max, unsigned SpellingListIndex);
bool checkNSReturnsRetainedReturnType(SourceLocation loc, QualType type);
//===--------------------------------------------------------------------===//
// C++ Coroutines TS
//
bool ActOnCoroutineBodyStart(Scope *S, SourceLocation KwLoc,
StringRef Keyword);
ExprResult ActOnCoawaitExpr(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult ActOnCoyieldExpr(Scope *S, SourceLocation KwLoc, Expr *E);
StmtResult ActOnCoreturnStmt(Scope *S, SourceLocation KwLoc, Expr *E);
ExprResult BuildResolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
ExprResult BuildUnresolvedCoawaitExpr(SourceLocation KwLoc, Expr *E,
UnresolvedLookupExpr* Lookup);
ExprResult BuildCoyieldExpr(SourceLocation KwLoc, Expr *E);
StmtResult BuildCoreturnStmt(SourceLocation KwLoc, Expr *E,
bool IsImplicit = false);
StmtResult BuildCoroutineBodyStmt(CoroutineBodyStmt::CtorArgs);
bool buildCoroutineParameterMoves(SourceLocation Loc);
VarDecl *buildCoroutinePromise(SourceLocation Loc);
void CheckCompletedCoroutineBody(FunctionDecl *FD, Stmt *&Body);
ClassTemplateDecl *lookupCoroutineTraits(SourceLocation KwLoc,
SourceLocation FuncLoc);
//===--------------------------------------------------------------------===//
// OpenCL extensions.
//
private:
std::string CurrOpenCLExtension;
/// Extensions required by an OpenCL type.
llvm::DenseMap<const Type*, std::set<std::string>> OpenCLTypeExtMap;
/// Extensions required by an OpenCL declaration.
llvm::DenseMap<const Decl*, std::set<std::string>> OpenCLDeclExtMap;
public:
llvm::StringRef getCurrentOpenCLExtension() const {
return CurrOpenCLExtension;
}
/// Check if a function declaration \p FD associates with any
/// extensions present in OpenCLDeclExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromDeclExtMap(FunctionDecl *FD);
/// Check if a function type \p FT associates with any
/// extensions present in OpenCLTypeExtMap and if so return the
/// extension(s) name(s).
std::string getOpenCLExtensionsFromTypeExtMap(FunctionType *FT);
/// Find an extension in an appropriate extension map and return its name
template<typename T, typename MapT>
std::string getOpenCLExtensionsFromExtMap(T* FT, MapT &Map);
void setCurrentOpenCLExtension(llvm::StringRef Ext) {
CurrOpenCLExtension = Ext;
}
/// Set OpenCL extensions for a type which can only be used when these
/// OpenCL extensions are enabled. If \p Exts is empty, do nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForType(QualType T, llvm::StringRef Exts);
/// Set OpenCL extensions for a declaration which can only be
/// used when these OpenCL extensions are enabled. If \p Exts is empty, do
/// nothing.
/// \param Exts A space separated list of OpenCL extensions.
void setOpenCLExtensionForDecl(Decl *FD, llvm::StringRef Exts);
/// Set current OpenCL extensions for a type which can only be used
/// when these OpenCL extensions are enabled. If current OpenCL extension is
/// empty, do nothing.
void setCurrentOpenCLExtensionForType(QualType T);
/// Set current OpenCL extensions for a declaration which
/// can only be used when these OpenCL extensions are enabled. If current
/// OpenCL extension is empty, do nothing.
void setCurrentOpenCLExtensionForDecl(Decl *FD);
bool isOpenCLDisabledDecl(Decl *FD);
/// Check if type \p T corresponding to declaration specifier \p DS
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledTypeDeclSpec(const DeclSpec &DS, QualType T);
/// Check if declaration \p D used by expression \p E
/// is disabled due to required OpenCL extensions being disabled. If so,
/// emit diagnostics.
/// \return true if type is disabled.
bool checkOpenCLDisabledDecl(const NamedDecl &D, const Expr &E);
//===--------------------------------------------------------------------===//
// OpenMP directives and clauses.
//
private:
void *VarDataSharingAttributesStack;
/// Number of nested '#pragma omp declare target' directives.
unsigned DeclareTargetNestingLevel = 0;
/// Initialization of data-sharing attributes stack.
void InitDataSharingAttributesStack();
void DestroyDataSharingAttributesStack();
ExprResult
VerifyPositiveIntegerConstantInClause(Expr *Op, OpenMPClauseKind CKind,
bool StrictlyPositive = true);
/// Returns OpenMP nesting level for current directive.
unsigned getOpenMPNestingLevel() const;
/// Adjusts the function scopes index for the target-based regions.
void adjustOpenMPTargetScopeIndex(unsigned &FunctionScopesIndex,
unsigned Level) const;
/// Push new OpenMP function region for non-capturing function.
void pushOpenMPFunctionRegion();
/// Pop OpenMP function region for non-capturing function.
void popOpenMPFunctionRegion(const sema::FunctionScopeInfo *OldFSI);
/// Check whether we're allowed to call Callee from the current function.
void checkOpenMPDeviceFunction(SourceLocation Loc, FunctionDecl *Callee);
/// Check if the expression is allowed to be used in expressions for the
/// OpenMP devices.
void checkOpenMPDeviceExpr(const Expr *E);
/// Checks if a type or a declaration is disabled due to the owning extension
/// being disabled, and emits diagnostic messages if it is disabled.
/// \param D type or declaration to be checked.
/// \param DiagLoc source location for the diagnostic message.
/// \param DiagInfo information to be emitted for the diagnostic message.
/// \param SrcRange source range of the declaration.
/// \param Map maps type or declaration to the extensions.
/// \param Selector selects diagnostic message: 0 for type and 1 for
/// declaration.
/// \return true if the type or declaration is disabled.
template <typename T, typename DiagLocT, typename DiagInfoT, typename MapT>
bool checkOpenCLDisabledTypeOrDecl(T D, DiagLocT DiagLoc, DiagInfoT DiagInfo,
MapT &Map, unsigned Selector = 0,
SourceRange SrcRange = SourceRange());
public:
/// Function tries to capture lambda's captured variables in the OpenMP region
/// before the original lambda is captured.
void tryCaptureOpenMPLambdas(ValueDecl *V);
/// Return true if the provided declaration \a VD should be captured by
/// reference.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPCapturedByRef(const ValueDecl *D, unsigned Level) const;
/// Check if the specified variable is used in one of the private
/// clauses (private, firstprivate, lastprivate, reduction etc.) in OpenMP
/// constructs.
VarDecl *isOpenMPCapturedDecl(ValueDecl *D, bool CheckScopeInfo = false,
unsigned StopAt = 0);
ExprResult getOpenMPCapturedExpr(VarDecl *Capture, ExprValueKind VK,
ExprObjectKind OK, SourceLocation Loc);
/// If the current region is a loop-based region, mark the start of the loop
/// construct.
void startOpenMPLoop();
/// Check if the specified variable is used in 'private' clause.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPPrivateDecl(const ValueDecl *D, unsigned Level) const;
/// Sets OpenMP capture kind (OMPC_private, OMPC_firstprivate, OMPC_map etc.)
/// for \p FD based on DSA for the provided corresponding captured declaration
/// \p D.
void setOpenMPCaptureKind(FieldDecl *FD, const ValueDecl *D, unsigned Level);
/// Check if the specified variable is captured by 'target' directive.
/// \param Level Relative level of nested OpenMP construct for that the check
/// is performed.
bool isOpenMPTargetCapturedDecl(const ValueDecl *D, unsigned Level) const;
ExprResult PerformOpenMPImplicitIntegerConversion(SourceLocation OpLoc,
Expr *Op);
/// Called on start of new data sharing attribute block.
void StartOpenMPDSABlock(OpenMPDirectiveKind K,
const DeclarationNameInfo &DirName, Scope *CurScope,
SourceLocation Loc);
/// Start analysis of clauses.
void StartOpenMPClause(OpenMPClauseKind K);
/// End analysis of clauses.
void EndOpenMPClause();
/// Called on end of data sharing attribute block.
void EndOpenMPDSABlock(Stmt *CurDirective);
/// Check if the current region is an OpenMP loop region and if it is,
/// mark loop control variable, used in \p Init for loop initialization, as
/// private by default.
/// \param Init First part of the for loop.
void ActOnOpenMPLoopInitialization(SourceLocation ForLoc, Stmt *Init);
// OpenMP directives and clauses.
/// Called on correct id-expression from the '#pragma omp
/// threadprivate'.
ExprResult ActOnOpenMPIdExpression(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OpenMPDirectiveKind Kind);
/// Called on well-formed '#pragma omp threadprivate'.
DeclGroupPtrTy ActOnOpenMPThreadprivateDirective(
SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Builds a new OpenMPThreadPrivateDecl and checks its correctness.
OMPThreadPrivateDecl *CheckOMPThreadPrivateDecl(SourceLocation Loc,
ArrayRef<Expr *> VarList);
/// Called on well-formed '#pragma omp allocate'.
DeclGroupPtrTy ActOnOpenMPAllocateDirective(SourceLocation Loc,
ArrayRef<Expr *> VarList,
ArrayRef<OMPClause *> Clauses,
DeclContext *Owner = nullptr);
/// Called on well-formed '#pragma omp requires'.
DeclGroupPtrTy ActOnOpenMPRequiresDirective(SourceLocation Loc,
ArrayRef<OMPClause *> ClauseList);
/// Check restrictions on Requires directive
OMPRequiresDecl *CheckOMPRequiresDecl(SourceLocation Loc,
ArrayRef<OMPClause *> Clauses);
/// Check if the specified type is allowed to be used in 'omp declare
/// reduction' construct.
QualType ActOnOpenMPDeclareReductionType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name,
ArrayRef<std::pair<QualType, SourceLocation>> ReductionTypes,
AccessSpecifier AS, Decl *PrevDeclInScope = nullptr);
/// Initialize declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionCombinerEnd(Decl *D, Expr *Combiner);
/// Initialize declare reduction construct initializer.
/// \return omp_priv variable.
VarDecl *ActOnOpenMPDeclareReductionInitializerStart(Scope *S, Decl *D);
/// Finish current declare reduction construct initializer.
void ActOnOpenMPDeclareReductionInitializerEnd(Decl *D, Expr *Initializer,
VarDecl *OmpPrivParm);
/// Called at the end of '#pragma omp declare reduction'.
DeclGroupPtrTy ActOnOpenMPDeclareReductionDirectiveEnd(
Scope *S, DeclGroupPtrTy DeclReductions, bool IsValid);
/// Check variable declaration in 'omp declare mapper' construct.
TypeResult ActOnOpenMPDeclareMapperVarDecl(Scope *S, Declarator &D);
/// Check if the specified type is allowed to be used in 'omp declare
/// mapper' construct.
QualType ActOnOpenMPDeclareMapperType(SourceLocation TyLoc,
TypeResult ParsedType);
/// Called on start of '#pragma omp declare mapper'.
OMPDeclareMapperDecl *ActOnOpenMPDeclareMapperDirectiveStart(
Scope *S, DeclContext *DC, DeclarationName Name, QualType MapperType,
SourceLocation StartLoc, DeclarationName VN, AccessSpecifier AS,
Decl *PrevDeclInScope = nullptr);
/// Build the mapper variable of '#pragma omp declare mapper'.
void ActOnOpenMPDeclareMapperDirectiveVarDecl(OMPDeclareMapperDecl *DMD,
Scope *S, QualType MapperType,
SourceLocation StartLoc,
DeclarationName VN);
/// Called at the end of '#pragma omp declare mapper'.
DeclGroupPtrTy
ActOnOpenMPDeclareMapperDirectiveEnd(OMPDeclareMapperDecl *D, Scope *S,
ArrayRef<OMPClause *> ClauseList);
/// Called on the start of target region i.e. '#pragma omp declare target'.
bool ActOnStartOpenMPDeclareTargetDirective(SourceLocation Loc);
/// Called at the end of target region i.e. '#pragme omp end declare target'.
void ActOnFinishOpenMPDeclareTargetDirective();
/// Called on correct id-expression from the '#pragma omp declare target'.
void ActOnOpenMPDeclareTargetName(Scope *CurScope, CXXScopeSpec &ScopeSpec,
const DeclarationNameInfo &Id,
OMPDeclareTargetDeclAttr::MapTypeTy MT,
NamedDeclSetType &SameDirectiveDecls);
/// Check declaration inside target region.
void
checkDeclIsAllowedInOpenMPTarget(Expr *E, Decl *D,
SourceLocation IdLoc = SourceLocation());
/// Return true inside OpenMP declare target region.
bool isInOpenMPDeclareTargetContext() const {
return DeclareTargetNestingLevel > 0;
}
/// Return true inside OpenMP target region.
bool isInOpenMPTargetExecutionDirective() const;
/// Return the number of captured regions created for an OpenMP directive.
static int getOpenMPCaptureLevels(OpenMPDirectiveKind Kind);
/// Initialization of captured region for OpenMP region.
void ActOnOpenMPRegionStart(OpenMPDirectiveKind DKind, Scope *CurScope);
/// End of OpenMP region.
///
/// \param S Statement associated with the current OpenMP region.
/// \param Clauses List of clauses for the current OpenMP region.
///
/// \returns Statement for finished OpenMP region.
StmtResult ActOnOpenMPRegionEnd(StmtResult S, ArrayRef<OMPClause *> Clauses);
StmtResult ActOnOpenMPExecutableDirective(
OpenMPDirectiveKind Kind, const DeclarationNameInfo &DirName,
OpenMPDirectiveKind CancelRegion, ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc, SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
using VarsWithInheritedDSAType =
llvm::SmallDenseMap<const ValueDecl *, const Expr *, 4>;
/// Called on well-formed '\#pragma omp simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp for simd' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPForSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp sections' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp section' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSectionDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp single' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPSingleDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp master' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPMasterDirective(Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp critical' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPCriticalDirective(const DeclarationNameInfo &DirName,
ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp parallel for' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp parallel sections' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPParallelSectionsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp task' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTaskDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskyield'.
StmtResult ActOnOpenMPTaskyieldDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp barrier'.
StmtResult ActOnOpenMPBarrierDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskwait'.
StmtResult ActOnOpenMPTaskwaitDirective(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp taskgroup'.
StmtResult ActOnOpenMPTaskgroupDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp flush'.
StmtResult ActOnOpenMPFlushDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp ordered' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPOrderedDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp atomic' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPAtomicDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target data' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTargetDataDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target enter data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetEnterDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target exit data' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetExitDataDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp target parallel' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp cancellation point'.
StmtResult
ActOnOpenMPCancellationPointDirective(SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp cancel'.
StmtResult ActOnOpenMPCancelDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
OpenMPDirectiveKind CancelRegion);
/// Called on well-formed '\#pragma omp taskloop' after parsing of the
/// associated statement.
StmtResult
ActOnOpenMPTaskLoopDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp taskloop simd' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTaskLoopSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute' after parsing
/// of the associated statement.
StmtResult
ActOnOpenMPDistributeDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target update'.
StmtResult ActOnOpenMPTargetUpdateDirective(ArrayRef<OMPClause *> Clauses,
SourceLocation StartLoc,
SourceLocation EndLoc,
Stmt *AStmt);
/// Called on well-formed '\#pragma omp distribute parallel for' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target parallel for simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target simd' after parsing of
/// the associated statement.
StmtResult
ActOnOpenMPTargetSimdDirective(ArrayRef<OMPClause *> Clauses, Stmt *AStmt,
SourceLocation StartLoc, SourceLocation EndLoc,
VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute' after parsing of
/// the associated statement.
StmtResult ActOnOpenMPTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute simd' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for simd'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams' after parsing of the
/// associated statement.
StmtResult ActOnOpenMPTargetTeamsDirective(ArrayRef<OMPClause *> Clauses,
Stmt *AStmt,
SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed '\#pragma omp target teams distribute' after parsing
/// of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for'
/// after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute parallel for
/// simd' after parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeParallelForSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Called on well-formed '\#pragma omp target teams distribute simd' after
/// parsing of the associated statement.
StmtResult ActOnOpenMPTargetTeamsDistributeSimdDirective(
ArrayRef<OMPClause *> Clauses, Stmt *AStmt, SourceLocation StartLoc,
SourceLocation EndLoc, VarsWithInheritedDSAType &VarsWithImplicitDSA);
/// Checks correctness of linear modifiers.
bool CheckOpenMPLinearModifier(OpenMPLinearClauseKind LinKind,
SourceLocation LinLoc);
/// Checks that the specified declaration matches requirements for the linear
/// decls.
bool CheckOpenMPLinearDecl(const ValueDecl *D, SourceLocation ELoc,
OpenMPLinearClauseKind LinKind, QualType Type);
/// Called on well-formed '\#pragma omp declare simd' after parsing of
/// the associated method/function.
DeclGroupPtrTy ActOnOpenMPDeclareSimdDirective(
DeclGroupPtrTy DG, OMPDeclareSimdDeclAttr::BranchStateTy BS,
Expr *Simdlen, ArrayRef<Expr *> Uniforms, ArrayRef<Expr *> Aligneds,
ArrayRef<Expr *> Alignments, ArrayRef<Expr *> Linears,
ArrayRef<unsigned> LinModifiers, ArrayRef<Expr *> Steps, SourceRange SR);
OMPClause *ActOnOpenMPSingleExprClause(OpenMPClauseKind Kind,
Expr *Expr,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'allocator' clause.
OMPClause *ActOnOpenMPAllocatorClause(Expr *Allocator,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'if' clause.
OMPClause *ActOnOpenMPIfClause(OpenMPDirectiveKind NameModifier,
Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation NameModifierLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'final' clause.
OMPClause *ActOnOpenMPFinalClause(Expr *Condition, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_threads' clause.
OMPClause *ActOnOpenMPNumThreadsClause(Expr *NumThreads,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'safelen' clause.
OMPClause *ActOnOpenMPSafelenClause(Expr *Length,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simdlen' clause.
OMPClause *ActOnOpenMPSimdlenClause(Expr *Length, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'collapse' clause.
OMPClause *ActOnOpenMPCollapseClause(Expr *NumForLoops,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'ordered' clause.
OMPClause *
ActOnOpenMPOrderedClause(SourceLocation StartLoc, SourceLocation EndLoc,
SourceLocation LParenLoc = SourceLocation(),
Expr *NumForLoops = nullptr);
/// Called on well-formed 'grainsize' clause.
OMPClause *ActOnOpenMPGrainsizeClause(Expr *Size, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'num_tasks' clause.
OMPClause *ActOnOpenMPNumTasksClause(Expr *NumTasks, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'hint' clause.
OMPClause *ActOnOpenMPHintClause(Expr *Hint, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSimpleClause(OpenMPClauseKind Kind,
unsigned Argument,
SourceLocation ArgumentLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'default' clause.
OMPClause *ActOnOpenMPDefaultClause(OpenMPDefaultClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'proc_bind' clause.
OMPClause *ActOnOpenMPProcBindClause(OpenMPProcBindClauseKind Kind,
SourceLocation KindLoc,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
OMPClause *ActOnOpenMPSingleExprWithArgClause(
OpenMPClauseKind Kind, ArrayRef<unsigned> Arguments, Expr *Expr,
SourceLocation StartLoc, SourceLocation LParenLoc,
ArrayRef<SourceLocation> ArgumentsLoc, SourceLocation DelimLoc,
SourceLocation EndLoc);
/// Called on well-formed 'schedule' clause.
OMPClause *ActOnOpenMPScheduleClause(
OpenMPScheduleClauseModifier M1, OpenMPScheduleClauseModifier M2,
OpenMPScheduleClauseKind Kind, Expr *ChunkSize, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation M1Loc, SourceLocation M2Loc,
SourceLocation KindLoc, SourceLocation CommaLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPClause(OpenMPClauseKind Kind, SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nowait' clause.
OMPClause *ActOnOpenMPNowaitClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'untied' clause.
OMPClause *ActOnOpenMPUntiedClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'mergeable' clause.
OMPClause *ActOnOpenMPMergeableClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'read' clause.
OMPClause *ActOnOpenMPReadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'write' clause.
OMPClause *ActOnOpenMPWriteClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'update' clause.
OMPClause *ActOnOpenMPUpdateClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'capture' clause.
OMPClause *ActOnOpenMPCaptureClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'seq_cst' clause.
OMPClause *ActOnOpenMPSeqCstClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'threads' clause.
OMPClause *ActOnOpenMPThreadsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'simd' clause.
OMPClause *ActOnOpenMPSIMDClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'nogroup' clause.
OMPClause *ActOnOpenMPNogroupClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedAddressClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'unified_address' clause.
OMPClause *ActOnOpenMPUnifiedSharedMemoryClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reverse_offload' clause.
OMPClause *ActOnOpenMPReverseOffloadClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dynamic_allocators' clause.
OMPClause *ActOnOpenMPDynamicAllocatorsClause(SourceLocation StartLoc,
SourceLocation EndLoc);
/// Called on well-formed 'atomic_default_mem_order' clause.
OMPClause *ActOnOpenMPAtomicDefaultMemOrderClause(
OpenMPAtomicDefaultMemOrderClauseKind Kind, SourceLocation KindLoc,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation EndLoc);
OMPClause *ActOnOpenMPVarListClause(
OpenMPClauseKind Kind, ArrayRef<Expr *> Vars, Expr *TailExpr,
const OMPVarListLocTy &Locs, SourceLocation ColonLoc,
CXXScopeSpec &ReductionOrMapperIdScopeSpec,
DeclarationNameInfo &ReductionOrMapperId, OpenMPDependClauseKind DepKind,
OpenMPLinearClauseKind LinKind,
ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc, OpenMPMapClauseKind MapType,
bool IsMapTypeImplicit, SourceLocation DepLinMapLoc);
/// Called on well-formed 'allocate' clause.
OMPClause *
ActOnOpenMPAllocateClause(Expr *Allocator, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation ColonLoc,
SourceLocation LParenLoc, SourceLocation EndLoc);
/// Called on well-formed 'private' clause.
OMPClause *ActOnOpenMPPrivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'firstprivate' clause.
OMPClause *ActOnOpenMPFirstprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'lastprivate' clause.
OMPClause *ActOnOpenMPLastprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'shared' clause.
OMPClause *ActOnOpenMPSharedClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'reduction' clause.
OMPClause *ActOnOpenMPReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'task_reduction' clause.
OMPClause *ActOnOpenMPTaskReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'in_reduction' clause.
OMPClause *ActOnOpenMPInReductionClause(
ArrayRef<Expr *> VarList, SourceLocation StartLoc,
SourceLocation LParenLoc, SourceLocation ColonLoc, SourceLocation EndLoc,
CXXScopeSpec &ReductionIdScopeSpec,
const DeclarationNameInfo &ReductionId,
ArrayRef<Expr *> UnresolvedReductions = llvm::None);
/// Called on well-formed 'linear' clause.
OMPClause *
ActOnOpenMPLinearClause(ArrayRef<Expr *> VarList, Expr *Step,
SourceLocation StartLoc, SourceLocation LParenLoc,
OpenMPLinearClauseKind LinKind, SourceLocation LinLoc,
SourceLocation ColonLoc, SourceLocation EndLoc);
/// Called on well-formed 'aligned' clause.
OMPClause *ActOnOpenMPAlignedClause(ArrayRef<Expr *> VarList,
Expr *Alignment,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation ColonLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyin' clause.
OMPClause *ActOnOpenMPCopyinClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'copyprivate' clause.
OMPClause *ActOnOpenMPCopyprivateClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'flush' pseudo clause.
OMPClause *ActOnOpenMPFlushClause(ArrayRef<Expr *> VarList,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'depend' clause.
OMPClause *
ActOnOpenMPDependClause(OpenMPDependClauseKind DepKind, SourceLocation DepLoc,
SourceLocation ColonLoc, ArrayRef<Expr *> VarList,
SourceLocation StartLoc, SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'device' clause.
OMPClause *ActOnOpenMPDeviceClause(Expr *Device, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'map' clause.
OMPClause *
ActOnOpenMPMapClause(ArrayRef<OpenMPMapModifierKind> MapTypeModifiers,
ArrayRef<SourceLocation> MapTypeModifiersLoc,
CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
OpenMPMapClauseKind MapType, bool IsMapTypeImplicit,
SourceLocation MapLoc, SourceLocation ColonLoc,
ArrayRef<Expr *> VarList, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'num_teams' clause.
OMPClause *ActOnOpenMPNumTeamsClause(Expr *NumTeams, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'thread_limit' clause.
OMPClause *ActOnOpenMPThreadLimitClause(Expr *ThreadLimit,
SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'priority' clause.
OMPClause *ActOnOpenMPPriorityClause(Expr *Priority, SourceLocation StartLoc,
SourceLocation LParenLoc,
SourceLocation EndLoc);
/// Called on well-formed 'dist_schedule' clause.
OMPClause *ActOnOpenMPDistScheduleClause(
OpenMPDistScheduleClauseKind Kind, Expr *ChunkSize,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation KindLoc,
SourceLocation CommaLoc, SourceLocation EndLoc);
/// Called on well-formed 'defaultmap' clause.
OMPClause *ActOnOpenMPDefaultmapClause(
OpenMPDefaultmapClauseModifier M, OpenMPDefaultmapClauseKind Kind,
SourceLocation StartLoc, SourceLocation LParenLoc, SourceLocation MLoc,
SourceLocation KindLoc, SourceLocation EndLoc);
/// Called on well-formed 'to' clause.
OMPClause *
ActOnOpenMPToClause(ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId,
const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'from' clause.
OMPClause *ActOnOpenMPFromClause(
ArrayRef<Expr *> VarList, CXXScopeSpec &MapperIdScopeSpec,
DeclarationNameInfo &MapperId, const OMPVarListLocTy &Locs,
ArrayRef<Expr *> UnresolvedMappers = llvm::None);
/// Called on well-formed 'use_device_ptr' clause.
OMPClause *ActOnOpenMPUseDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// Called on well-formed 'is_device_ptr' clause.
OMPClause *ActOnOpenMPIsDevicePtrClause(ArrayRef<Expr *> VarList,
const OMPVarListLocTy &Locs);
/// The kind of conversion being performed.
enum CheckedConversionKind {
/// An implicit conversion.
CCK_ImplicitConversion,
/// A C-style cast.
CCK_CStyleCast,
/// A functional-style cast.
CCK_FunctionalCast,
/// A cast other than a C-style cast.
CCK_OtherCast,
/// A conversion for an operand of a builtin overloaded operator.
CCK_ForBuiltinOverloadedOp
};
static bool isCast(CheckedConversionKind CCK) {
return CCK == CCK_CStyleCast || CCK == CCK_FunctionalCast ||
CCK == CCK_OtherCast;
}
/// ImpCastExprToType - If Expr is not of type 'Type', insert an implicit
/// cast. If there is already an implicit cast, merge into the existing one.
/// If isLvalue, the result of the cast is an lvalue.
ExprResult ImpCastExprToType(Expr *E, QualType Type, CastKind CK,
ExprValueKind VK = VK_RValue,
const CXXCastPath *BasePath = nullptr,
CheckedConversionKind CCK
= CCK_ImplicitConversion,
bool isBoundsSafeInterfaceCast = false);
/// ScalarTypeToBooleanCastKind - Returns the cast kind corresponding
/// to the conversion from scalar type ScalarTy to the Boolean type.
static CastKind ScalarTypeToBooleanCastKind(QualType ScalarTy);
/// IgnoredValueConversions - Given that an expression's result is
/// syntactically ignored, perform any conversions that are
/// required.
ExprResult IgnoredValueConversions(Expr *E);
// UsualUnaryConversions - promotes integers (C99 6.3.1.1p2) and converts
// functions and arrays to their respective pointers (C99 6.3.2.1).
ExprResult UsualUnaryConversions(Expr *E);
/// CallExprUnaryConversions - a special case of an unary conversion
/// performed on a function designator of a call expression.
ExprResult CallExprUnaryConversions(Expr *E);
// DefaultFunctionArrayConversion - converts functions and arrays
// to their respective pointers (C99 6.3.2.1).
ExprResult DefaultFunctionArrayConversion(Expr *E, bool Diagnose = true);
// DefaultFunctionArrayLvalueConversion - converts functions and
// arrays to their respective pointers and performs the
// lvalue-to-rvalue conversion.
ExprResult DefaultFunctionArrayLvalueConversion(Expr *E,
bool Diagnose = true);
// DefaultLvalueConversion - performs lvalue-to-rvalue conversion on
// the operand. This is DefaultFunctionArrayLvalueConversion,
// except that it assumes the operand isn't of function or array
// type.
ExprResult DefaultLvalueConversion(Expr *E);
// DefaultArgumentPromotion (C99 6.5.2.2p6). Used for function calls that
// do not have a prototype. Integer promotions are performed on each
// argument, and arguments that have type float are promoted to double.
ExprResult DefaultArgumentPromotion(Expr *E);
/// If \p E is a prvalue denoting an unmaterialized temporary, materialize
/// it as an xvalue. In C++98, the result will still be a prvalue, because
/// we don't have xvalues there.
ExprResult TemporaryMaterializationConversion(Expr *E);
// Used for emitting the right warning by DefaultVariadicArgumentPromotion
enum VariadicCallType {
VariadicFunction,
VariadicBlock,
VariadicMethod,
VariadicConstructor,
VariadicDoesNotApply
};
VariadicCallType getVariadicCallType(FunctionDecl *FDecl,
const FunctionProtoType *Proto,
Expr *Fn);
// Used for determining in which context a type is allowed to be passed to a
// vararg function.
enum VarArgKind {
VAK_Valid,
VAK_ValidInCXX11,
VAK_Undefined,
VAK_MSVCUndefined,
VAK_Invalid
};
// Determines which VarArgKind fits an expression.
VarArgKind isValidVarArgType(const QualType &Ty);
/// Check to see if the given expression is a valid argument to a variadic
/// function, issuing a diagnostic if not.
void checkVariadicArgument(const Expr *E, VariadicCallType CT);
/// Check to see if a given expression could have '.c_str()' called on it.
bool hasCStrMethod(const Expr *E);
/// GatherArgumentsForCall - Collector argument expressions for various
/// form of call prototypes.
bool GatherArgumentsForCall(SourceLocation CallLoc, FunctionDecl *FDecl,
const FunctionProtoType *Proto,
unsigned FirstParam, ArrayRef<Expr *> Args,
SmallVectorImpl<Expr *> &AllArgs,
VariadicCallType CallType = VariadicDoesNotApply,
bool AllowExplicit = false,
bool IsListInitialization = false);
// DefaultVariadicArgumentPromotion - Like DefaultArgumentPromotion, but
// will create a runtime trap if the resulting type is not a POD type.
ExprResult DefaultVariadicArgumentPromotion(Expr *E, VariadicCallType CT,
FunctionDecl *FDecl);
// UsualArithmeticConversions - performs the UsualUnaryConversions on it's
// operands and then handles various conversions that are common to binary
// operators (C99 6.3.1.8). If both operands aren't arithmetic, this
// routine returns the first non-arithmetic type found. The client is
// responsible for emitting appropriate error diagnostics.
QualType UsualArithmeticConversions(ExprResult &LHS, ExprResult &RHS,
bool IsCompAssign = false);
/// AssignConvertType - All of the 'assignment' semantic checks return this
/// enum to indicate whether the assignment was allowed. These checks are
/// done for simple assignments, as well as initialization, return from
/// function, argument passing, etc. The query is phrased in terms of a
/// source and destination type.
enum AssignConvertType {
/// Compatible - the types are compatible according to the standard.
Compatible,
/// PointerToInt - The assignment converts a pointer to an int, which we
/// accept as an extension.
PointerToInt,
/// IntToPointer - The assignment converts an int to a pointer, which we
/// accept as an extension.
IntToPointer,
/// FunctionVoidPointer - The assignment is between a function pointer and
/// void*, which the standard doesn't allow, but we accept as an extension.
FunctionVoidPointer,
/// IncompatiblePointer - The assignment is between two pointers types that
/// are not compatible, but we accept them as an extension.
IncompatiblePointer,
/// IncompatiblePointerSign - The assignment is between two pointers types
/// which point to integers which have a different sign, but are otherwise
/// identical. This is a subset of the above, but broken out because it's by
/// far the most common case of incompatible pointers.
IncompatiblePointerSign,
/// CompatiblePointerDiscardsQualifiers - The assignment discards
/// c/v/r qualifiers, which we accept as an extension.
CompatiblePointerDiscardsQualifiers,
/// IncompatiblePointerDiscardsQualifiers - The assignment
/// discards qualifiers that we don't permit to be discarded,
/// like address spaces.
IncompatiblePointerDiscardsQualifiers,
/// IncompatibleNestedPointerAddressSpaceMismatch - The assignment
/// changes address spaces in nested pointer types which is not allowed.
/// For instance, converting __private int ** to __generic int ** is
/// illegal even though __private could be converted to __generic.
IncompatibleNestedPointerAddressSpaceMismatch,
/// IncompatibleNestedPointerQualifiers - The assignment is between two
/// nested pointer types, and the qualifiers other than the first two
/// levels differ e.g. char ** -> const char **, but we accept them as an
/// extension.
IncompatibleNestedPointerQualifiers,
/// IncompatibleVectors - The assignment is between two vector types that
/// have the same size, which we accept as an extension.
IncompatibleVectors,
/// IntToBlockPointer - The assignment converts an int to a block
/// pointer. We disallow this.
IntToBlockPointer,
/// IncompatibleBlockPointer - The assignment is between two block
/// pointers types that are not compatible.
IncompatibleBlockPointer,
/// IncompatibleObjCQualifiedId - The assignment is between a qualified
/// id type and something else (that is incompatible with it). For example,
/// "id <XXX>" = "Foo *", where "Foo *" doesn't implement the XXX protocol.
IncompatibleObjCQualifiedId,
/// IncompatibleObjCWeakRef - Assigning a weak-unavailable object to an
/// object with __weak qualifier.
IncompatibleObjCWeakRef,
/// IncompatibleCheckedCVoid - Assignments to/from void pointers to pointers
/// to data containing checked pointers is not allowed in regular checked
/// scopes. It is allowed only in unchecked and checked bounds_only scopes.
IncompatibleCheckedCVoid,
/// Incompatible - We reject this conversion outright, it is invalid to
/// represent it in the AST.
Incompatible
};
/// DiagnoseAssignmentResult - Emit a diagnostic, if required, for the
/// assignment conversion type specified by ConvTy. This returns true if the
/// conversion was invalid or false if the conversion was accepted.
bool DiagnoseAssignmentResult(AssignConvertType ConvTy,
SourceLocation Loc,
QualType DstType, QualType SrcType,
Expr *SrcExpr, AssignmentAction Action,
bool *Complained = nullptr);
/// IsValueInFlagEnum - Determine if a value is allowed as part of a flag
/// enum. If AllowMask is true, then we also allow the complement of a valid
/// value, to be used as a mask.
bool IsValueInFlagEnum(const EnumDecl *ED, const llvm::APInt &Val,
bool AllowMask) const;
/// DiagnoseAssignmentEnum - Warn if assignment to enum is a constant
/// integer not in the range of enum values.
void DiagnoseAssignmentEnum(QualType DstType, QualType SrcType,
Expr *SrcExpr);
/// CheckAssignmentConstraints - Perform type checking for assignment,
/// argument passing, variable initialization, and function return values.
/// C99 6.5.16.
AssignConvertType CheckAssignmentConstraints(SourceLocation Loc,
QualType LHSType,
QualType RHSType);
/// Check assignment constraints and optionally prepare for a conversion of
/// the RHS to the LHS type. The conversion is prepared for if ConvertRHS
/// is true.
AssignConvertType CheckAssignmentConstraints(QualType LHSType,
ExprResult &RHS,
CastKind &Kind,
bool ConvertRHS = true);
/// Check assignment constraints for an assignment of RHS to LHSType.
///
/// \param LHSType The destination type for the assignment.
/// \param RHS The source expression for the assignment.
/// \param Diagnose If \c true, diagnostics may be produced when checking
/// for assignability. If a diagnostic is produced, \p RHS will be
/// set to ExprError(). Note that this function may still return
/// without producing a diagnostic, even for an invalid assignment.
/// \param DiagnoseCFAudited If \c true, the target is a function parameter
/// in an audited Core Foundation API and does not need to be checked
/// for ARC retain issues.
/// \param ConvertRHS If \c true, \p RHS will be updated to model the
/// conversions necessary to perform the assignment. If \c false,
/// \p Diagnose must also be \c false.
AssignConvertType CheckSingleAssignmentConstraints(
QualType LHSType, ExprResult &RHS, bool Diagnose = true,
bool DiagnoseCFAudited = false, bool ConvertRHS = true,
QualType LHSInteropType = QualType());
public:
/// \brief: Given a value with type Ty that has a bounds declaration,
/// compute the bounds-safe interface type. Returns a null QualType
/// if nnoe exists.
QualType SynthesizeInteropType(QualType Ty, bool isParam);
/// Rewrite function types with bounds-safe interfaces on unchecked
/// types to use the checked types specified by the interfaces. Recursively
/// apply the rewrite to function types nested within the type.
QualType RewriteBoundsSafeInterfaceTypes(QualType Ty);
/// \brief Get the bounds-safe interface type for LHS.
/// Returns a null QualType if there isn't one.
QualType GetCheckedCLValueInteropType(ExprResult LHS);
/// \brief Get the bounds-safe interface type for RHS.
/// Returns a null QualType if there isn't one.
QualType GetCheckedCRValueInteropType(ExprResult RHS);
/// \brief If T is an array type, create a checked array type version of T.
/// This includes propagating the checked property to nested array types. If
/// a valid checked array type cannot be constructed and Diagnose is true,
/// print a diagnostic message for the problem.
QualType MakeCheckedArrayType(QualType T, bool Diagnose = false,
SourceLocation Loc = SourceLocation());
// If the lhs type is a transparent union, check whether we
// can initialize the transparent union with the given expression.
AssignConvertType CheckTransparentUnionArgumentConstraints(QualType ArgType,
ExprResult &RHS);
bool IsStringLiteralToNonConstPointerConversion(Expr *From, QualType ToType);
bool CheckExceptionSpecCompatibility(Expr *From, QualType ToType);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit = false);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
AssignmentAction Action,
bool AllowExplicit,
ImplicitConversionSequence& ICS);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const ImplicitConversionSequence& ICS,
AssignmentAction Action,
CheckedConversionKind CCK
= CCK_ImplicitConversion);
ExprResult PerformImplicitConversion(Expr *From, QualType ToType,
const StandardConversionSequence& SCS,
AssignmentAction Action,
CheckedConversionKind CCK);
ExprResult PerformQualificationConversion(
Expr *E, QualType Ty, ExprValueKind VK = VK_RValue,
CheckedConversionKind CCK = CCK_ImplicitConversion);
/// the following "Check" methods will return a valid/converted QualType
/// or a null QualType (indicating an error diagnostic was issued).
/// type checking binary operators (subroutines of CreateBuiltinBinOp).
QualType InvalidOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType InvalidLogicalVectorOperands(SourceLocation Loc, ExprResult &LHS,
ExprResult &RHS);
QualType CheckPointerToMemberOperands( // C++ 5.5
ExprResult &LHS, ExprResult &RHS, ExprValueKind &VK,
SourceLocation OpLoc, bool isIndirect);
QualType CheckMultiplyDivideOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc, bool IsCompAssign,
bool IsDivide);
QualType CheckRemainderOperands( // C99 6.5.5
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
bool IsCompAssign = false);
QualType CheckAdditionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, QualType* CompLHSTy = nullptr);
QualType CheckSubtractionOperands( // C99 6.5.6
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
QualType* CompLHSTy = nullptr);
QualType CheckShiftOperands( // C99 6.5.7
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc, bool IsCompAssign = false);
QualType CheckCompareOperands( // C99 6.5.8/9
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckBitwiseOperands( // C99 6.5.[10...12]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckLogicalOperands( // C99 6.5.[13,14]
ExprResult &LHS, ExprResult &RHS, SourceLocation Loc,
BinaryOperatorKind Opc);
// CheckAssignmentOperands is used for both simple and compound assignment.
// For simple assignment, pass both expressions and a null converted type.
// For compound assignment, pass both expressions and the converted type.
QualType CheckAssignmentOperands( // C99 6.5.16.[1,2]
Expr *LHSExpr, ExprResult &RHS, SourceLocation Loc, QualType CompoundType);
ExprResult checkPseudoObjectIncDec(Scope *S, SourceLocation OpLoc,
UnaryOperatorKind Opcode, Expr *Op);
ExprResult checkPseudoObjectAssignment(Scope *S, SourceLocation OpLoc,
BinaryOperatorKind Opcode,
Expr *LHS, Expr *RHS);
ExprResult checkPseudoObjectRValue(Expr *E);
Expr *recreateSyntacticForm(PseudoObjectExpr *E);
QualType CheckConditionalOperands( // C99 6.5.15
ExprResult &Cond, ExprResult &LHS, ExprResult &RHS,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation QuestionLoc);
QualType CXXCheckConditionalOperands( // C++ 5.16
ExprResult &cond, ExprResult &lhs, ExprResult &rhs,
ExprValueKind &VK, ExprObjectKind &OK, SourceLocation questionLoc);
QualType FindCompositePointerType(SourceLocation Loc, Expr *&E1, Expr *&E2,
bool ConvertArgs = true);
QualType FindCompositePointerType(SourceLocation Loc,
ExprResult &E1, ExprResult &E2,
bool ConvertArgs = true) {
Expr *E1Tmp = E1.get(), *E2Tmp = E2.get();
QualType Composite =
FindCompositePointerType(Loc, E1Tmp, E2Tmp, ConvertArgs);
E1 = E1Tmp;
E2 = E2Tmp;
return Composite;
}
QualType FindCompositeObjCPointerType(ExprResult &LHS, ExprResult &RHS,
SourceLocation QuestionLoc);
bool DiagnoseConditionalForNull(Expr *LHSExpr, Expr *RHSExpr,
SourceLocation QuestionLoc);
void DiagnoseAlwaysNonNullPointer(Expr *E,
Expr::NullPointerConstantKind NullType,
bool IsEqual, SourceRange Range);
/// type checking for vector binary operators.
QualType CheckVectorOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc, bool IsCompAssign,
bool AllowBothBool, bool AllowBoolConversion);
QualType GetSignedVectorType(QualType V);
QualType CheckVectorCompareOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc,
BinaryOperatorKind Opc);
QualType CheckVectorLogicalOperands(ExprResult &LHS, ExprResult &RHS,
SourceLocation Loc);
bool areLaxCompatibleVectorTypes(QualType srcType, QualType destType);
bool isLaxVectorConversion(QualType srcType, QualType destType);
/// type checking declaration initializers (C99 6.7.8)
bool CheckForConstantInitializer(Expr *e, QualType t);
// type checking C++ declaration initializers (C++ [dcl.init]).
/// ReferenceCompareResult - Expresses the result of comparing two
/// types (cv1 T1 and cv2 T2) to determine their compatibility for the
/// purposes of initialization by reference (C++ [dcl.init.ref]p4).
enum ReferenceCompareResult {
/// Ref_Incompatible - The two types are incompatible, so direct
/// reference binding is not possible.
Ref_Incompatible = 0,
/// Ref_Related - The two types are reference-related, which means
/// that their unqualified forms (T1 and T2) are either the same
/// or T1 is a base class of T2.
Ref_Related,
/// Ref_Compatible - The two types are reference-compatible.
Ref_Compatible
};
ReferenceCompareResult CompareReferenceRelationship(SourceLocation Loc,
QualType T1, QualType T2,
bool &DerivedToBase,
bool &ObjCConversion,
bool &ObjCLifetimeConversion);
ExprResult checkUnknownAnyCast(SourceRange TypeRange, QualType CastType,
Expr *CastExpr, CastKind &CastKind,
ExprValueKind &VK, CXXCastPath &Path);
/// Force an expression with unknown-type to an expression of the
/// given type.
ExprResult forceUnknownAnyToType(Expr *E, QualType ToType);
/// Type-check an expression that's being passed to an
/// __unknown_anytype parameter.
ExprResult checkUnknownAnyArg(SourceLocation callLoc,
Expr *result, QualType ¶mType);
// CheckVectorCast - check type constraints for vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size.
// returns true if the cast is invalid
bool CheckVectorCast(SourceRange R, QualType VectorTy, QualType Ty,
CastKind &Kind);
/// Prepare `SplattedExpr` for a vector splat operation, adding
/// implicit casts if necessary.
ExprResult prepareVectorSplat(QualType VectorTy, Expr *SplattedExpr);
// CheckExtVectorCast - check type constraints for extended vectors.
// Since vectors are an extension, there are no C standard reference for this.
// We allow casting between vectors and integer datatypes of the same size,
// or vectors and the element type of that vector.
// returns the cast expr
ExprResult CheckExtVectorCast(SourceRange R, QualType DestTy, Expr *CastExpr,
CastKind &Kind);
ExprResult BuildCXXFunctionalCastExpr(TypeSourceInfo *TInfo, QualType Type,
SourceLocation LParenLoc,
Expr *CastExpr,
SourceLocation RParenLoc);
enum ARCConversionResult { ACR_okay, ACR_unbridged, ACR_error };
/// Checks for invalid conversions and casts between
/// retainable pointers and other pointer kinds for ARC and Weak.
ARCConversionResult CheckObjCConversion(SourceRange castRange,
QualType castType, Expr *&op,
CheckedConversionKind CCK,
bool Diagnose = true,
bool DiagnoseCFAudited = false,
BinaryOperatorKind Opc = BO_PtrMemD
);
Expr *stripARCUnbridgedCast(Expr *e);
void diagnoseARCUnbridgedCast(Expr *e);
bool CheckObjCARCUnavailableWeakConversion(QualType castType,
QualType ExprType);
/// checkRetainCycles - Check whether an Objective-C message send
/// might create an obvious retain cycle.
void checkRetainCycles(ObjCMessageExpr *msg);
void checkRetainCycles(Expr *receiver, Expr *argument);
void checkRetainCycles(VarDecl *Var, Expr *Init);
/// checkUnsafeAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained type.
bool checkUnsafeAssigns(SourceLocation Loc, QualType LHS, Expr *RHS);
/// checkUnsafeExprAssigns - Check whether +1 expr is being assigned
/// to weak/__unsafe_unretained expression.
void checkUnsafeExprAssigns(SourceLocation Loc, Expr *LHS, Expr *RHS);
/// CheckMessageArgumentTypes - Check types in an Obj-C message send.
/// \param Method - May be null.
/// \param [out] ReturnType - The return type of the send.
/// \return true iff there were any incompatible types.
bool CheckMessageArgumentTypes(const Expr *Receiver, QualType ReceiverType,
MultiExprArg Args, Selector Sel,
ArrayRef<SourceLocation> SelectorLocs,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage, SourceLocation lbrac,
SourceLocation rbrac, SourceRange RecRange,
QualType &ReturnType, ExprValueKind &VK);
/// Determine the result of a message send expression based on
/// the type of the receiver, the method expected to receive the message,
/// and the form of the message send.
QualType getMessageSendResultType(const Expr *Receiver, QualType ReceiverType,
ObjCMethodDecl *Method, bool isClassMessage,
bool isSuperMessage);
/// If the given expression involves a message send to a method
/// with a related result type, emit a note describing what happened.
void EmitRelatedResultTypeNote(const Expr *E);
/// Given that we had incompatible pointer types in a return
/// statement, check whether we're in a method with a related result
/// type, and if so, emit a note describing what happened.
void EmitRelatedResultTypeNoteForReturn(QualType destType);
class ConditionResult {
Decl *ConditionVar;
FullExprArg Condition;
bool Invalid;
bool HasKnownValue;
bool KnownValue;
friend class Sema;
ConditionResult(Sema &S, Decl *ConditionVar, FullExprArg Condition,
bool IsConstexpr)
: ConditionVar(ConditionVar), Condition(Condition), Invalid(false),
HasKnownValue(IsConstexpr && Condition.get() &&
!Condition.get()->isValueDependent()),
KnownValue(HasKnownValue &&
!!Condition.get()->EvaluateKnownConstInt(S.Context)) {}
explicit ConditionResult(bool Invalid)
: ConditionVar(nullptr), Condition(nullptr), Invalid(Invalid),
HasKnownValue(false), KnownValue(false) {}
public:
ConditionResult() : ConditionResult(false) {}
bool isInvalid() const { return Invalid; }
std::pair<VarDecl *, Expr *> get() const {
return std::make_pair(cast_or_null<VarDecl>(ConditionVar),
Condition.get());
}
llvm::Optional<bool> getKnownValue() const {
if (!HasKnownValue)
return None;
return KnownValue;
}
};
static ConditionResult ConditionError() { return ConditionResult(true); }
enum class ConditionKind {
Boolean, ///< A boolean condition, from 'if', 'while', 'for', or 'do'.
ConstexprIf, ///< A constant boolean condition from 'if constexpr'.
Switch ///< An integral condition for a 'switch' statement.
};
ConditionResult ActOnCondition(Scope *S, SourceLocation Loc,
Expr *SubExpr, ConditionKind CK);
ConditionResult ActOnConditionVariable(Decl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
DeclResult ActOnCXXConditionDeclaration(Scope *S, Declarator &D);
ExprResult CheckConditionVariable(VarDecl *ConditionVar,
SourceLocation StmtLoc,
ConditionKind CK);
ExprResult CheckSwitchCondition(SourceLocation SwitchLoc, Expr *Cond);
/// CheckBooleanCondition - Diagnose problems involving the use of
/// the given expression as a boolean condition (e.g. in an if
/// statement). Also performs the standard function and array
/// decays, possibly changing the input variable.
///
/// \param Loc - A location associated with the condition, e.g. the
/// 'if' keyword.
/// \return true iff there were any errors
ExprResult CheckBooleanCondition(SourceLocation Loc, Expr *E,
bool IsConstexpr = false);
/// ActOnExplicitBoolSpecifier - Build an ExplicitSpecifier from an expression
/// found in an explicit(bool) specifier.
ExplicitSpecifier ActOnExplicitBoolSpecifier(Expr *E);
/// tryResolveExplicitSpecifier - Attempt to resolve the explict specifier.
/// Returns true if the explicit specifier is now resolved.
bool tryResolveExplicitSpecifier(ExplicitSpecifier &ExplicitSpec);
/// DiagnoseAssignmentAsCondition - Given that an expression is
/// being used as a boolean condition, warn if it's an assignment.
void DiagnoseAssignmentAsCondition(Expr *E);
/// Redundant parentheses over an equality comparison can indicate
/// that the user intended an assignment used as condition.
void DiagnoseEqualityWithExtraParens(ParenExpr *ParenE);
/// CheckCXXBooleanCondition - Returns true if conversion to bool is invalid.
ExprResult CheckCXXBooleanCondition(Expr *CondExpr, bool IsConstexpr = false);
/// ConvertIntegerToTypeWarnOnOverflow - Convert the specified APInt to have
/// the specified width and sign. If an overflow occurs, detect it and emit
/// the specified diagnostic.
void ConvertIntegerToTypeWarnOnOverflow(llvm::APSInt &OldVal,
unsigned NewWidth, bool NewSign,
SourceLocation Loc, unsigned DiagID);
/// Checks that the Objective-C declaration is declared in the global scope.
/// Emits an error and marks the declaration as invalid if it's not declared
/// in the global scope.
bool CheckObjCDeclScope(Decl *D);
/// Abstract base class used for diagnosing integer constant
/// expression violations.
class VerifyICEDiagnoser {
public:
bool Suppress;
VerifyICEDiagnoser(bool Suppress = false) : Suppress(Suppress) { }
virtual void diagnoseNotICE(Sema &S, SourceLocation Loc, SourceRange SR) =0;
virtual void diagnoseFold(Sema &S, SourceLocation Loc, SourceRange SR);
virtual ~VerifyICEDiagnoser() { }
};
/// VerifyIntegerConstantExpression - Verifies that an expression is an ICE,
/// and reports the appropriate diagnostics. Returns false on success.
/// Can optionally return the value of the expression.
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
VerifyICEDiagnoser &Diagnoser,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E, llvm::APSInt *Result,
unsigned DiagID,
bool AllowFold = true);
ExprResult VerifyIntegerConstantExpression(Expr *E,
llvm::APSInt *Result = nullptr);
/// VerifyBitField - verifies that a bit field expression is an ICE and has
/// the correct width, and that the field type is valid.
/// Returns false on success.
/// Can optionally return whether the bit-field is of width 0
ExprResult VerifyBitField(SourceLocation FieldLoc, IdentifierInfo *FieldName,
QualType FieldTy, bool IsMsStruct,
Expr *BitWidth, bool *ZeroWidth = nullptr);
private:
unsigned ForceCUDAHostDeviceDepth = 0;
public:
/// Increments our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. So long as this count is greater
/// than zero, all functions encountered will be __host__ __device__.
void PushForceCUDAHostDevice();
/// Decrements our count of the number of times we've seen a pragma forcing
/// functions to be __host__ __device__. Returns false if the count is 0
/// before incrementing, so you can emit an error.
bool PopForceCUDAHostDevice();
/// Diagnostics that are emitted only if we discover that the given function
/// must be codegen'ed. Because handling these correctly adds overhead to
/// compilation, this is currently only enabled for CUDA compilations.
llvm::DenseMap<CanonicalDeclPtr<FunctionDecl>,
std::vector<PartialDiagnosticAt>>
DeviceDeferredDiags;
/// A pair of a canonical FunctionDecl and a SourceLocation. When used as the
/// key in a hashtable, both the FD and location are hashed.
struct FunctionDeclAndLoc {
CanonicalDeclPtr<FunctionDecl> FD;
SourceLocation Loc;
};
/// FunctionDecls and SourceLocations for which CheckCUDACall has emitted a
/// (maybe deferred) "bad call" diagnostic. We use this to avoid emitting the
/// same deferred diag twice.
llvm::DenseSet<FunctionDeclAndLoc> LocsWithCUDACallDiags;
/// An inverse call graph, mapping known-emitted functions to one of their
/// known-emitted callers (plus the location of the call).
///
/// Functions that we can tell a priori must be emitted aren't added to this
/// map.
llvm::DenseMap</* Callee = */ CanonicalDeclPtr<FunctionDecl>,
/* Caller = */ FunctionDeclAndLoc>
DeviceKnownEmittedFns;
/// A partial call graph maintained during CUDA/OpenMP device code compilation
/// to support deferred diagnostics.
///
/// Functions are only added here if, at the time they're considered, they are
/// not known-emitted. As soon as we discover that a function is
/// known-emitted, we remove it and everything it transitively calls from this
/// set and add those functions to DeviceKnownEmittedFns.
llvm::DenseMap</* Caller = */ CanonicalDeclPtr<FunctionDecl>,
/* Callees = */ llvm::MapVector<CanonicalDeclPtr<FunctionDecl>,
SourceLocation>>
DeviceCallGraph;
/// Diagnostic builder for CUDA/OpenMP devices errors which may or may not be
/// deferred.
///
/// In CUDA, there exist constructs (e.g. variable-length arrays, try/catch)
/// which are not allowed to appear inside __device__ functions and are
/// allowed to appear in __host__ __device__ functions only if the host+device
/// function is never codegen'ed.
///
/// To handle this, we use the notion of "deferred diagnostics", where we
/// attach a diagnostic to a FunctionDecl that's emitted iff it's codegen'ed.
///
/// This class lets you emit either a regular diagnostic, a deferred
/// diagnostic, or no diagnostic at all, according to an argument you pass to
/// its constructor, thus simplifying the process of creating these "maybe
/// deferred" diagnostics.
class DeviceDiagBuilder {
public:
enum Kind {
/// Emit no diagnostics.
K_Nop,
/// Emit the diagnostic immediately (i.e., behave like Sema::Diag()).
K_Immediate,
/// Emit the diagnostic immediately, and, if it's a warning or error, also
/// emit a call stack showing how this function can be reached by an a
/// priori known-emitted function.
K_ImmediateWithCallStack,
/// Create a deferred diagnostic, which is emitted only if the function
/// it's attached to is codegen'ed. Also emit a call stack as with
/// K_ImmediateWithCallStack.
K_Deferred
};
DeviceDiagBuilder(Kind K, SourceLocation Loc, unsigned DiagID,
FunctionDecl *Fn, Sema &S);
DeviceDiagBuilder(DeviceDiagBuilder &&D);
DeviceDiagBuilder(const DeviceDiagBuilder &) = default;
~DeviceDiagBuilder();
/// Convertible to bool: True if we immediately emitted an error, false if
/// we didn't emit an error or we created a deferred error.
///
/// Example usage:
///
/// if (DeviceDiagBuilder(...) << foo << bar)
/// return ExprError();
///
/// But see CUDADiagIfDeviceCode() and CUDADiagIfHostCode() -- you probably
/// want to use these instead of creating a DeviceDiagBuilder yourself.
operator bool() const { return ImmediateDiag.hasValue(); }
template <typename T>
friend const DeviceDiagBuilder &operator<<(const DeviceDiagBuilder &Diag,
const T &Value) {
if (Diag.ImmediateDiag.hasValue())
*Diag.ImmediateDiag << Value;
else if (Diag.PartialDiagId.hasValue())
Diag.S.DeviceDeferredDiags[Diag.Fn][*Diag.PartialDiagId].second
<< Value;
return Diag;
}
private:
Sema &S;
SourceLocation Loc;
unsigned DiagID;
FunctionDecl *Fn;
bool ShowCallStack;
// Invariant: At most one of these Optionals has a value.
// FIXME: Switch these to a Variant once that exists.
llvm::Optional<SemaDiagnosticBuilder> ImmediateDiag;
llvm::Optional<unsigned> PartialDiagId;
};
/// Indicate that this function (and thus everything it transtively calls)
/// will be codegen'ed, and emit any deferred diagnostics on this function and
/// its (transitive) callees.
void markKnownEmitted(
Sema &S, FunctionDecl *OrigCaller, FunctionDecl *OrigCallee,
SourceLocation OrigLoc,
const llvm::function_ref<bool(Sema &, FunctionDecl *)> IsKnownEmitted);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as device code".
///
/// - If CurContext is a __host__ function, does not emit any diagnostics.
/// - If CurContext is a __device__ or __global__ function, emits the
/// diagnostics immediately.
/// - If CurContext is a __host__ __device__ function and we are compiling for
/// the device, creates a diagnostic which is emitted if and when we realize
/// that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in CUDA device code.
/// if (CUDADiagIfDeviceCode(Loc, diag::err_cuda_vla) << CurrentCUDATarget())
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder CUDADiagIfDeviceCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current context
/// is "used as host code".
///
/// Same as CUDADiagIfDeviceCode, with "host" and "device" switched.
DeviceDiagBuilder CUDADiagIfHostCode(SourceLocation Loc, unsigned DiagID);
/// Creates a DeviceDiagBuilder that emits the diagnostic if the current
/// context is "used as device code".
///
/// - If CurContext is a `declare target` function or it is known that the
/// function is emitted for the device, emits the diagnostics immediately.
/// - If CurContext is a non-`declare target` function and we are compiling
/// for the device, creates a diagnostic which is emitted if and when we
/// realize that the function will be codegen'ed.
///
/// Example usage:
///
/// // Variable-length arrays are not allowed in NVPTX device code.
/// if (diagIfOpenMPDeviceCode(Loc, diag::err_vla_unsupported))
/// return ExprError();
/// // Otherwise, continue parsing as normal.
DeviceDiagBuilder diagIfOpenMPDeviceCode(SourceLocation Loc, unsigned DiagID);
DeviceDiagBuilder targetDiag(SourceLocation Loc, unsigned DiagID);
enum CUDAFunctionTarget {
CFT_Device,
CFT_Global,
CFT_Host,
CFT_HostDevice,
CFT_InvalidTarget
};
/// Determines whether the given function is a CUDA device/host/kernel/etc.
/// function.
///
/// Use this rather than examining the function's attributes yourself -- you
/// will get it wrong. Returns CFT_Host if D is null.
CUDAFunctionTarget IdentifyCUDATarget(const FunctionDecl *D,
bool IgnoreImplicitHDAttr = false);
CUDAFunctionTarget IdentifyCUDATarget(const ParsedAttributesView &Attrs);
/// Gets the CUDA target for the current context.
CUDAFunctionTarget CurrentCUDATarget() {
return IdentifyCUDATarget(dyn_cast<FunctionDecl>(CurContext));
}
// CUDA function call preference. Must be ordered numerically from
// worst to best.
enum CUDAFunctionPreference {
CFP_Never, // Invalid caller/callee combination.
CFP_WrongSide, // Calls from host-device to host or device
// function that do not match current compilation
// mode.
CFP_HostDevice, // Any calls to host/device functions.
CFP_SameSide, // Calls from host-device to host or device
// function matching current compilation mode.
CFP_Native, // host-to-host or device-to-device calls.
};
/// Identifies relative preference of a given Caller/Callee
/// combination, based on their host/device attributes.
/// \param Caller function which needs address of \p Callee.
/// nullptr in case of global context.
/// \param Callee target function
///
/// \returns preference value for particular Caller/Callee combination.
CUDAFunctionPreference IdentifyCUDAPreference(const FunctionDecl *Caller,
const FunctionDecl *Callee);
/// Determines whether Caller may invoke Callee, based on their CUDA
/// host/device attributes. Returns false if the call is not allowed.
///
/// Note: Will return true for CFP_WrongSide calls. These may appear in
/// semantically correct CUDA programs, but only if they're never codegen'ed.
bool IsAllowedCUDACall(const FunctionDecl *Caller,
const FunctionDecl *Callee) {
return IdentifyCUDAPreference(Caller, Callee) != CFP_Never;
}
/// May add implicit CUDAHostAttr and CUDADeviceAttr attributes to FD,
/// depending on FD and the current compilation settings.
void maybeAddCUDAHostDeviceAttrs(FunctionDecl *FD,
const LookupResult &Previous);
public:
/// Check whether we're allowed to call Callee from the current context.
///
/// - If the call is never allowed in a semantically-correct program
/// (CFP_Never), emits an error and returns false.
///
/// - If the call is allowed in semantically-correct programs, but only if
/// it's never codegen'ed (CFP_WrongSide), creates a deferred diagnostic to
/// be emitted if and when the caller is codegen'ed, and returns true.
///
/// Will only create deferred diagnostics for a given SourceLocation once,
/// so you can safely call this multiple times without generating duplicate
/// deferred errors.
///
/// - Otherwise, returns true without emitting any diagnostics.
bool CheckCUDACall(SourceLocation Loc, FunctionDecl *Callee);
/// Set __device__ or __host__ __device__ attributes on the given lambda
/// operator() method.
///
/// CUDA lambdas declared inside __device__ or __global__ functions inherit
/// the __device__ attribute. Similarly, lambdas inside __host__ __device__
/// functions become __host__ __device__ themselves.
void CUDASetLambdaAttrs(CXXMethodDecl *Method);
/// Finds a function in \p Matches with highest calling priority
/// from \p Caller context and erases all functions with lower
/// calling priority.
void EraseUnwantedCUDAMatches(
const FunctionDecl *Caller,
SmallVectorImpl<std::pair<DeclAccessPair, FunctionDecl *>> &Matches);
/// Given a implicit special member, infer its CUDA target from the
/// calls it needs to make to underlying base/field special members.
/// \param ClassDecl the class for which the member is being created.
/// \param CSM the kind of special member.
/// \param MemberDecl the special member itself.
/// \param ConstRHS true if this is a copy operation with a const object on
/// its RHS.
/// \param Diagnose true if this call should emit diagnostics.
/// \return true if there was an error inferring.
/// The result of this call is implicit CUDA target attribute(s) attached to
/// the member declaration.
bool inferCUDATargetForImplicitSpecialMember(CXXRecordDecl *ClassDecl,
CXXSpecialMember CSM,
CXXMethodDecl *MemberDecl,
bool ConstRHS,
bool Diagnose);
/// \return true if \p CD can be considered empty according to CUDA
/// (E.2.3.1 in CUDA 7.5 Programming guide).
bool isEmptyCudaConstructor(SourceLocation Loc, CXXConstructorDecl *CD);
bool isEmptyCudaDestructor(SourceLocation Loc, CXXDestructorDecl *CD);
// \brief Checks that initializers of \p Var satisfy CUDA restrictions. In
// case of error emits appropriate diagnostic and invalidates \p Var.
//
// \details CUDA allows only empty constructors as initializers for global
// variables (see E.2.3.1, CUDA 7.5). The same restriction also applies to all
// __shared__ variables whether they are local or not (they all are implicitly
// static in CUDA). One exception is that CUDA allows constant initializers
// for __constant__ and __device__ variables.
void checkAllowedCUDAInitializer(VarDecl *VD);
/// Check whether NewFD is a valid overload for CUDA. Emits
/// diagnostics and invalidates NewFD if not.
void checkCUDATargetOverload(FunctionDecl *NewFD,
const LookupResult &Previous);
/// Copies target attributes from the template TD to the function FD.
void inheritCUDATargetAttrs(FunctionDecl *FD, const FunctionTemplateDecl &TD);
/// Returns the name of the launch configuration function. This is the name
/// of the function that will be called to configure kernel call, with the
/// parameters specified via <<<>>>.
std::string getCudaConfigureFuncName() const;
/// \name Code completion
//@{
/// Describes the context in which code completion occurs.
enum ParserCompletionContext {
/// Code completion occurs at top-level or namespace context.
PCC_Namespace,
/// Code completion occurs within a class, struct, or union.
PCC_Class,
/// Code completion occurs within an Objective-C interface, protocol,
/// or category.
PCC_ObjCInterface,
/// Code completion occurs within an Objective-C implementation or
/// category implementation
PCC_ObjCImplementation,
/// Code completion occurs within the list of instance variables
/// in an Objective-C interface, protocol, category, or implementation.
PCC_ObjCInstanceVariableList,
/// Code completion occurs following one or more template
/// headers.
PCC_Template,
/// Code completion occurs following one or more template
/// headers within a class.
PCC_MemberTemplate,
/// Code completion occurs within an expression.
PCC_Expression,
/// Code completion occurs within a statement, which may
/// also be an expression or a declaration.
PCC_Statement,
/// Code completion occurs at the beginning of the
/// initialization statement (or expression) in a for loop.
PCC_ForInit,
/// Code completion occurs within the condition of an if,
/// while, switch, or for statement.
PCC_Condition,
/// Code completion occurs within the body of a function on a
/// recovery path, where we do not have a specific handle on our position
/// in the grammar.
PCC_RecoveryInFunction,
/// Code completion occurs where only a type is permitted.
PCC_Type,
/// Code completion occurs in a parenthesized expression, which
/// might also be a type cast.
PCC_ParenthesizedExpression,
/// Code completion occurs within a sequence of declaration
/// specifiers within a function, method, or block.
PCC_LocalDeclarationSpecifiers
};
void CodeCompleteModuleImport(SourceLocation ImportLoc, ModuleIdPath Path);
void CodeCompleteOrdinaryName(Scope *S,
ParserCompletionContext CompletionContext);
void CodeCompleteDeclSpec(Scope *S, DeclSpec &DS,
bool AllowNonIdentifiers,
bool AllowNestedNameSpecifiers);
struct CodeCompleteExpressionData;
void CodeCompleteExpression(Scope *S,
const CodeCompleteExpressionData &Data);
void CodeCompleteExpression(Scope *S, QualType PreferredType,
bool IsParenthesized = false);
void CodeCompleteMemberReferenceExpr(Scope *S, Expr *Base, Expr *OtherOpBase,
SourceLocation OpLoc, bool IsArrow,
bool IsBaseExprStatement,
QualType PreferredType);
void CodeCompletePostfixExpression(Scope *S, ExprResult LHS,
QualType PreferredType);
void CodeCompleteTag(Scope *S, unsigned TagSpec);
void CodeCompleteTypeQualifiers(DeclSpec &DS);
void CodeCompleteFunctionQualifiers(DeclSpec &DS, Declarator &D,
const VirtSpecifiers *VS = nullptr);
void CodeCompleteBracketDeclarator(Scope *S);
void CodeCompleteCase(Scope *S);
/// Reports signatures for a call to CodeCompleteConsumer and returns the
/// preferred type for the current argument. Returned type can be null.
QualType ProduceCallSignatureHelp(Scope *S, Expr *Fn, ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceConstructorSignatureHelp(Scope *S, QualType Type,
SourceLocation Loc,
ArrayRef<Expr *> Args,
SourceLocation OpenParLoc);
QualType ProduceCtorInitMemberSignatureHelp(Scope *S, Decl *ConstructorDecl,
CXXScopeSpec SS,
ParsedType TemplateTypeTy,
ArrayRef<Expr *> ArgExprs,
IdentifierInfo *II,
SourceLocation OpenParLoc);
void CodeCompleteInitializer(Scope *S, Decl *D);
void CodeCompleteAfterIf(Scope *S);
void CodeCompleteQualifiedId(Scope *S, CXXScopeSpec &SS, bool EnteringContext,
QualType BaseType, QualType PreferredType);
void CodeCompleteUsing(Scope *S);
void CodeCompleteUsingDirective(Scope *S);
void CodeCompleteNamespaceDecl(Scope *S);
void CodeCompleteNamespaceAliasDecl(Scope *S);
void CodeCompleteOperatorName(Scope *S);
void CodeCompleteConstructorInitializer(
Decl *Constructor,
ArrayRef<CXXCtorInitializer *> Initializers);
void CodeCompleteLambdaIntroducer(Scope *S, LambdaIntroducer &Intro,
bool AfterAmpersand);
void CodeCompleteObjCAtDirective(Scope *S);
void CodeCompleteObjCAtVisibility(Scope *S);
void CodeCompleteObjCAtStatement(Scope *S);
void CodeCompleteObjCAtExpression(Scope *S);
void CodeCompleteObjCPropertyFlags(Scope *S, ObjCDeclSpec &ODS);
void CodeCompleteObjCPropertyGetter(Scope *S);
void CodeCompleteObjCPropertySetter(Scope *S);
void CodeCompleteObjCPassingType(Scope *S, ObjCDeclSpec &DS,
bool IsParameter);
void CodeCompleteObjCMessageReceiver(Scope *S);
void CodeCompleteObjCSuperMessage(Scope *S, SourceLocation SuperLoc,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression);
void CodeCompleteObjCClassMessage(Scope *S, ParsedType Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
bool IsSuper = false);
void CodeCompleteObjCInstanceMessage(Scope *S, Expr *Receiver,
ArrayRef<IdentifierInfo *> SelIdents,
bool AtArgumentExpression,
ObjCInterfaceDecl *Super = nullptr);
void CodeCompleteObjCForCollection(Scope *S,
DeclGroupPtrTy IterationVar);
void CodeCompleteObjCSelector(Scope *S,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCProtocolReferences(
ArrayRef<IdentifierLocPair> Protocols);
void CodeCompleteObjCProtocolDecl(Scope *S);
void CodeCompleteObjCInterfaceDecl(Scope *S);
void CodeCompleteObjCSuperclass(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationDecl(Scope *S);
void CodeCompleteObjCInterfaceCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCImplementationCategory(Scope *S,
IdentifierInfo *ClassName,
SourceLocation ClassNameLoc);
void CodeCompleteObjCPropertyDefinition(Scope *S);
void CodeCompleteObjCPropertySynthesizeIvar(Scope *S,
IdentifierInfo *PropertyName);
void CodeCompleteObjCMethodDecl(Scope *S, Optional<bool> IsInstanceMethod,
ParsedType ReturnType);
void CodeCompleteObjCMethodDeclSelector(Scope *S,
bool IsInstanceMethod,
bool AtParameterName,
ParsedType ReturnType,
ArrayRef<IdentifierInfo *> SelIdents);
void CodeCompleteObjCClassPropertyRefExpr(Scope *S, IdentifierInfo &ClassName,
SourceLocation ClassNameLoc,
bool IsBaseExprStatement);
void CodeCompletePreprocessorDirective(bool InConditional);
void CodeCompleteInPreprocessorConditionalExclusion(Scope *S);
void CodeCompletePreprocessorMacroName(bool IsDefinition);
void CodeCompletePreprocessorExpression();
void CodeCompletePreprocessorMacroArgument(Scope *S,
IdentifierInfo *Macro,
MacroInfo *MacroInfo,
unsigned Argument);
void CodeCompleteIncludedFile(llvm::StringRef Dir, bool IsAngled);
void CodeCompleteNaturalLanguage();
void CodeCompleteAvailabilityPlatformName();
void GatherGlobalCodeCompletions(CodeCompletionAllocator &Allocator,
CodeCompletionTUInfo &CCTUInfo,
SmallVectorImpl<CodeCompletionResult> &Results);
//@}
//===--------------------------------------------------------------------===//
// Extra semantic analysis beyond the C type system
public:
SourceLocation getLocationOfStringLiteralByte(const StringLiteral *SL,
unsigned ByteNo) const;
private:
void CheckArrayAccess(const Expr *BaseExpr, const Expr *IndexExpr,
const ArraySubscriptExpr *ASE=nullptr,
bool AllowOnePastEnd=true, bool IndexNegated=false);
void CheckArrayAccess(const Expr *E);
// Used to grab the relevant information from a FormatAttr and a
// FunctionDeclaration.
struct FormatStringInfo {
unsigned FormatIdx;
unsigned FirstDataArg;
bool HasVAListArg;
};
static bool getFormatStringInfo(const FormatAttr *Format, bool IsCXXMember,
FormatStringInfo *FSI);
bool CheckFunctionCall(FunctionDecl *FDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckObjCMethodCall(ObjCMethodDecl *Method, SourceLocation loc,
ArrayRef<const Expr *> Args);
bool CheckPointerCall(NamedDecl *NDecl, CallExpr *TheCall,
const FunctionProtoType *Proto);
bool CheckOtherCall(CallExpr *TheCall, const FunctionProtoType *Proto);
void CheckConstructorCall(FunctionDecl *FDecl,
ArrayRef<const Expr *> Args,
const FunctionProtoType *Proto,
SourceLocation Loc);
void checkCall(NamedDecl *FDecl, const FunctionProtoType *Proto,
const Expr *ThisArg, ArrayRef<const Expr *> Args,
bool IsMemberFunction, SourceLocation Loc, SourceRange Range,
VariadicCallType CallType);
bool CheckObjCString(Expr *Arg);
ExprResult CheckOSLogFormatStringArg(Expr *Arg);
ExprResult CheckBuiltinFunctionCall(FunctionDecl *FDecl,
unsigned BuiltinID, CallExpr *TheCall);
void checkFortifiedBuiltinMemoryFunction(FunctionDecl *FD, CallExpr *TheCall);
bool CheckARMBuiltinExclusiveCall(unsigned BuiltinID, CallExpr *TheCall,
unsigned MaxWidth);
bool CheckNeonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckARMBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckAArch64BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinCpu(unsigned BuiltinID, CallExpr *TheCall);
bool CheckHexagonBuiltinArgument(unsigned BuiltinID, CallExpr *TheCall);
bool CheckMipsBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckSystemZBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinRoundingOrSAE(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinGatherScatterScale(unsigned BuiltinID, CallExpr *TheCall);
bool CheckX86BuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool CheckPPCBuiltinFunctionCall(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStart(unsigned BuiltinID, CallExpr *TheCall);
bool SemaBuiltinVAStartARMMicrosoft(CallExpr *Call);
bool SemaBuiltinUnorderedCompare(CallExpr *TheCall);
bool SemaBuiltinFPClassification(CallExpr *TheCall, unsigned NumArgs);
bool SemaBuiltinVSX(CallExpr *TheCall);
bool SemaBuiltinOSLogFormat(CallExpr *TheCall);
public:
// Used by C++ template instantiation.
ExprResult SemaBuiltinShuffleVector(CallExpr *TheCall);
ExprResult SemaConvertVectorExpr(Expr *E, TypeSourceInfo *TInfo,
SourceLocation BuiltinLoc,
SourceLocation RParenLoc);
private:
bool SemaBuiltinPrefetch(CallExpr *TheCall);
bool SemaBuiltinAllocaWithAlign(CallExpr *TheCall);
bool SemaBuiltinAssume(CallExpr *TheCall);
bool SemaBuiltinAssumeAligned(CallExpr *TheCall);
bool SemaBuiltinLongjmp(CallExpr *TheCall);
bool SemaBuiltinSetjmp(CallExpr *TheCall);
ExprResult SemaBuiltinAtomicOverloaded(ExprResult TheCallResult);
ExprResult SemaBuiltinNontemporalOverloaded(ExprResult TheCallResult);
ExprResult SemaAtomicOpsOverloaded(ExprResult TheCallResult,
AtomicExpr::AtomicOp Op);
ExprResult SemaBuiltinOperatorNewDeleteOverloaded(ExprResult TheCallResult,
bool IsDelete);
bool SemaBuiltinConstantArg(CallExpr *TheCall, int ArgNum,
llvm::APSInt &Result);
bool SemaBuiltinConstantArgRange(CallExpr *TheCall, int ArgNum, int Low,
int High, bool RangeIsError = true);
bool SemaBuiltinConstantArgMultiple(CallExpr *TheCall, int ArgNum,
unsigned Multiple);
bool SemaBuiltinARMSpecialReg(unsigned BuiltinID, CallExpr *TheCall,
int ArgNum, unsigned ExpectedFieldNum,
bool AllowName);
bool SemaBuiltinARMMemoryTaggingCall(unsigned BuiltinID, CallExpr *TheCall);
public:
enum FormatStringType {
FST_Scanf,
FST_Printf,
FST_NSString,
FST_Strftime,
FST_Strfmon,
FST_Kprintf,
FST_FreeBSDKPrintf,
FST_OSTrace,
FST_OSLog,
FST_Unknown
};
static FormatStringType GetFormatStringType(const FormatAttr *Format);
bool FormatStringHasSArg(const StringLiteral *FExpr);
static bool GetFormatNSStringIdx(const FormatAttr *Format, unsigned &Idx);
private:
bool CheckFormatArguments(const FormatAttr *Format,
ArrayRef<const Expr *> Args,
bool IsCXXMember,
VariadicCallType CallType,
SourceLocation Loc, SourceRange Range,
llvm::SmallBitVector &CheckedVarArgs);
bool CheckFormatArguments(ArrayRef<const Expr *> Args,
bool HasVAListArg, unsigned format_idx,
unsigned firstDataArg, FormatStringType Type,
VariadicCallType CallType,
SourceLocation Loc, SourceRange range,
llvm::SmallBitVector &CheckedVarArgs);
void CheckAbsoluteValueFunction(const CallExpr *Call,
const FunctionDecl *FDecl);
void CheckMaxUnsignedZero(const CallExpr *Call, const FunctionDecl *FDecl);
void CheckMemaccessArguments(const CallExpr *Call,
unsigned BId,
IdentifierInfo *FnName);
void CheckStrlcpycatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckStrncatArguments(const CallExpr *Call,
IdentifierInfo *FnName);
void CheckReturnValExpr(Expr *RetValExp, QualType lhsType,
SourceLocation ReturnLoc,
bool isObjCMethod = false,
const AttrVec *Attrs = nullptr,
const FunctionDecl *FD = nullptr);
public:
void CheckFloatComparison(SourceLocation Loc, Expr *LHS, Expr *RHS);
private:
void CheckImplicitConversions(Expr *E, SourceLocation CC = SourceLocation());
void CheckBoolLikeConversion(Expr *E, SourceLocation CC);
void CheckForIntOverflow(Expr *E);
void CheckUnsequencedOperations(Expr *E);
/// Perform semantic checks on a completed expression. This will either
/// be a full-expression or a default argument expression.
void CheckCompletedExpr(Expr *E, SourceLocation CheckLoc = SourceLocation(),
bool IsConstexpr = false);
void CheckBitFieldInitialization(SourceLocation InitLoc, FieldDecl *Field,
Expr *Init);
/// Check if there is a field shadowing.
void CheckShadowInheritedFields(const SourceLocation &Loc,
DeclarationName FieldName,
const CXXRecordDecl *RD,
bool DeclIsField = true);
/// Check if the given expression contains 'break' or 'continue'
/// statement that produces control flow different from GCC.
void CheckBreakContinueBinding(Expr *E);
/// Check whether receiver is mutable ObjC container which
/// attempts to add itself into the container
void CheckObjCCircularContainer(ObjCMessageExpr *Message);
void AnalyzeDeleteExprMismatch(const CXXDeleteExpr *DE);
void AnalyzeDeleteExprMismatch(FieldDecl *Field, SourceLocation DeleteLoc,
bool DeleteWasArrayForm);
public:
/// Register a magic integral constant to be used as a type tag.
void RegisterTypeTagForDatatype(const IdentifierInfo *ArgumentKind,
uint64_t MagicValue, QualType Type,
bool LayoutCompatible, bool MustBeNull);
struct TypeTagData {
TypeTagData() {}
TypeTagData(QualType Type, bool LayoutCompatible, bool MustBeNull) :
Type(Type), LayoutCompatible(LayoutCompatible),
MustBeNull(MustBeNull)
{}
QualType Type;
/// If true, \c Type should be compared with other expression's types for
/// layout-compatibility.
unsigned LayoutCompatible : 1;
unsigned MustBeNull : 1;
};
/// A pair of ArgumentKind identifier and magic value. This uniquely
/// identifies the magic value.
typedef std::pair<const IdentifierInfo *, uint64_t> TypeTagMagicValue;
private:
/// A map from magic value to type information.
std::unique_ptr<llvm::DenseMap<TypeTagMagicValue, TypeTagData>>
TypeTagForDatatypeMagicValues;
/// Peform checks on a call of a function with argument_with_type_tag
/// or pointer_with_type_tag attributes.
void CheckArgumentWithTypeTag(const ArgumentWithTypeTagAttr *Attr,
const ArrayRef<const Expr *> ExprArgs,
SourceLocation CallSiteLoc);
/// Check if we are taking the address of a packed field
/// as this may be a problem if the pointer value is dereferenced.
void CheckAddressOfPackedMember(Expr *rhs);
/// The parser's current scope.
///
/// The parser maintains this state here.
Scope *CurScope;
mutable IdentifierInfo *Ident_super;
mutable IdentifierInfo *Ident___float128;
/// Nullability type specifiers.
IdentifierInfo *Ident__Nonnull = nullptr;
IdentifierInfo *Ident__Nullable = nullptr;
IdentifierInfo *Ident__Null_unspecified = nullptr;
IdentifierInfo *Ident_NSError = nullptr;
/// The handler for the FileChanged preprocessor events.
///
/// Used for diagnostics that implement custom semantic analysis for #include
/// directives, like -Wpragma-pack.
sema::SemaPPCallbacks *SemaPPCallbackHandler;
protected:
friend class Parser;
friend class InitializationSequence;
friend class ASTReader;
friend class ASTDeclReader;
friend class ASTWriter;
public:
/// Retrieve the keyword associated
IdentifierInfo *getNullabilityKeyword(NullabilityKind nullability);
/// The struct behind the CFErrorRef pointer.
RecordDecl *CFError = nullptr;
/// Retrieve the identifier "NSError".
IdentifierInfo *getNSErrorIdent();
/// Retrieve the parser's current scope.
///
/// This routine must only be used when it is certain that semantic analysis
/// and the parser are in precisely the same context, which is not the case
/// when, e.g., we are performing any kind of template instantiation.
/// Therefore, the only safe places to use this scope are in the parser
/// itself and in routines directly invoked from the parser and *never* from
/// template substitution or instantiation.
Scope *getCurScope() const { return CurScope; }
void incrementMSManglingNumber() const {
return CurScope->incrementMSManglingNumber();
}
IdentifierInfo *getSuperIdentifier() const;
IdentifierInfo *getFloat128Identifier() const;
Decl *getObjCDeclContext() const;
DeclContext *getCurLexicalContext() const {
return OriginalLexicalContext ? OriginalLexicalContext : CurContext;
}
const DeclContext *getCurObjCLexicalContext() const {
const DeclContext *DC = getCurLexicalContext();
// A category implicitly has the attribute of the interface.
if (const ObjCCategoryDecl *CatD = dyn_cast<ObjCCategoryDecl>(DC))
DC = CatD->getClassInterface();
return DC;
}
/// To be used for checking whether the arguments being passed to
/// function exceeds the number of parameters expected for it.
static bool TooManyArguments(size_t NumParams, size_t NumArgs,
bool PartialOverloading = false) {
// We check whether we're just after a comma in code-completion.
if (NumArgs > 0 && PartialOverloading)
return NumArgs + 1 > NumParams; // If so, we view as an extra argument.
return NumArgs > NumParams;
}
// Emitting members of dllexported classes is delayed until the class
// (including field initializers) is fully parsed.
SmallVector<CXXRecordDecl*, 4> DelayedDllExportClasses;
private:
class SavePendingParsedClassStateRAII {
public:
SavePendingParsedClassStateRAII(Sema &S) : S(S) { swapSavedState(); }
~SavePendingParsedClassStateRAII() {
assert(S.DelayedOverridingExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedEquivalentExceptionSpecChecks.empty() &&
"there shouldn't be any pending delayed exception spec checks");
assert(S.DelayedDllExportClasses.empty() &&
"there shouldn't be any pending delayed DLL export classes");
swapSavedState();
}
private:
Sema &S;
decltype(DelayedOverridingExceptionSpecChecks)
SavedOverridingExceptionSpecChecks;
decltype(DelayedEquivalentExceptionSpecChecks)
SavedEquivalentExceptionSpecChecks;
decltype(DelayedDllExportClasses) SavedDllExportClasses;
void swapSavedState() {
SavedOverridingExceptionSpecChecks.swap(
S.DelayedOverridingExceptionSpecChecks);
SavedEquivalentExceptionSpecChecks.swap(
S.DelayedEquivalentExceptionSpecChecks);
SavedDllExportClasses.swap(S.DelayedDllExportClasses);
}
};
/// Helper class that collects misaligned member designations and
/// their location info for delayed diagnostics.
struct MisalignedMember {
Expr *E;
RecordDecl *RD;
ValueDecl *MD;
CharUnits Alignment;
MisalignedMember() : E(), RD(), MD(), Alignment() {}
MisalignedMember(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment)
: E(E), RD(RD), MD(MD), Alignment(Alignment) {}
explicit MisalignedMember(Expr *E)
: MisalignedMember(E, nullptr, nullptr, CharUnits()) {}
bool operator==(const MisalignedMember &m) { return this->E == m.E; }
};
/// Small set of gathered accesses to potentially misaligned members
/// due to the packed attribute.
SmallVector<MisalignedMember, 4> MisalignedMembers;
/// Adds an expression to the set of gathered misaligned members.
void AddPotentialMisalignedMembers(Expr *E, RecordDecl *RD, ValueDecl *MD,
CharUnits Alignment);
public:
/// Diagnoses the current set of gathered accesses. This typically
/// happens at full expression level. The set is cleared after emitting the
/// diagnostics.
void DiagnoseMisalignedMembers();
/// This function checks if the expression is in the sef of potentially
/// misaligned members and it is converted to some pointer type T with lower
/// or equal alignment requirements. If so it removes it. This is used when
/// we do not want to diagnose such misaligned access (e.g. in conversions to
/// void*).
void DiscardMisalignedMemberAddress(const Type *T, Expr *E);
/// This function calls Action when it determines that E designates a
/// misaligned member due to the packed attribute. This is used to emit
/// local diagnostics like in reference binding.
void RefersToMemberWithReducedAlignment(
Expr *E,
llvm::function_ref<void(Expr *, RecordDecl *, FieldDecl *, CharUnits)>
Action);
/// Describes the reason a calling convention specification was ignored, used
/// for diagnostics.
enum class CallingConventionIgnoredReason {
ForThisTarget = 0,
VariadicFunction,
ConstructorDestructor,
BuiltinFunction
};
};
/// RAII object that enters a new expression evaluation context.
class EnterExpressionEvaluationContext {
Sema &Actions;
bool Entered = true;
public:
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Decl *LambdaContextDecl = nullptr,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other,
bool ShouldEnter = true)
: Actions(Actions), Entered(ShouldEnter) {
if (Entered)
Actions.PushExpressionEvaluationContext(NewContext, LambdaContextDecl,
ExprContext);
}
EnterExpressionEvaluationContext(
Sema &Actions, Sema::ExpressionEvaluationContext NewContext,
Sema::ReuseLambdaContextDecl_t,
Sema::ExpressionEvaluationContextRecord::ExpressionKind ExprContext =
Sema::ExpressionEvaluationContextRecord::EK_Other)
: Actions(Actions) {
Actions.PushExpressionEvaluationContext(
NewContext, Sema::ReuseLambdaContextDecl, ExprContext);
}
enum InitListTag { InitList };
EnterExpressionEvaluationContext(Sema &Actions, InitListTag,
bool ShouldEnter = true)
: Actions(Actions), Entered(false) {
// In C++11 onwards, narrowing checks are performed on the contents of
// braced-init-lists, even when they occur within unevaluated operands.
// Therefore we still need to instantiate constexpr functions used in such
// a context.
if (ShouldEnter && Actions.isUnevaluatedContext() &&
Actions.getLangOpts().CPlusPlus11) {
Actions.PushExpressionEvaluationContext(
Sema::ExpressionEvaluationContext::UnevaluatedList);
Entered = true;
}
}
~EnterExpressionEvaluationContext() {
if (Entered)
Actions.PopExpressionEvaluationContext();
}
};
/// \brief RAII object that handles state changes for processing a member
// bounds expressions.
class EnterMemberBoundsExprRAII {
Sema &S;
bool SavedMemberBounds;
public:
EnterMemberBoundsExprRAII(Sema &S)
: S(S), SavedMemberBounds(S.IsMemberBoundsExpr)
{
S.IsMemberBoundsExpr = true;
}
~EnterMemberBoundsExprRAII() {
S.IsMemberBoundsExpr = SavedMemberBounds;
}
};
DeductionFailureInfo
MakeDeductionFailureInfo(ASTContext &Context, Sema::TemplateDeductionResult TDK,
sema::TemplateDeductionInfo &Info);
/// Contains a late templated function.
/// Will be parsed at the end of the translation unit, used by Sema & Parser.
struct LateParsedTemplate {
CachedTokens Toks;
/// The template function declaration to be late parsed.
Decl *D;
};
} // end namespace clang
namespace llvm {
// Hash a FunctionDeclAndLoc by looking at both its FunctionDecl and its
// SourceLocation.
template <> struct DenseMapInfo<clang::Sema::FunctionDeclAndLoc> {
using FunctionDeclAndLoc = clang::Sema::FunctionDeclAndLoc;
using FDBaseInfo = DenseMapInfo<clang::CanonicalDeclPtr<clang::FunctionDecl>>;
static FunctionDeclAndLoc getEmptyKey() {
return {FDBaseInfo::getEmptyKey(), clang::SourceLocation()};
}
static FunctionDeclAndLoc getTombstoneKey() {
return {FDBaseInfo::getTombstoneKey(), clang::SourceLocation()};
}
static unsigned getHashValue(const FunctionDeclAndLoc &FDL) {
return hash_combine(FDBaseInfo::getHashValue(FDL.FD),
FDL.Loc.getRawEncoding());
}
static bool isEqual(const FunctionDeclAndLoc &LHS,
const FunctionDeclAndLoc &RHS) {
return LHS.FD == RHS.FD && LHS.Loc == RHS.Loc;
}
};
} // namespace llvm
#endif
|
core_dsyrk_blasfeo.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @generated from core_blas/core_zsyrk.c, normal z -> d, Thu Aug 8 17:24:59 2019
*
**/
#include <plasma_core_blas.h>
#include "plasma_types.h"
#include "core_lapack.h"
#include "blasfeo_d_aux.h"
/***************************************************************************//**
*
* @ingroup core_syrk
*
* Performs one of the symmetric rank k operations
*
* \f[ C = \alpha A \times A^T + \beta C, \f]
* or
* \f[ C = \alpha A^T \times A + \beta C, \f]
*
* where alpha and beta are scalars, C is an n-by-n symmetric
* matrix, and A is an n-by-k matrix in the first case and a k-by-n
* matrix in the second case.
*
*******************************************************************************
*
* @param[in] uplo
* - PlasmaUpper: Upper triangle of C is stored;
* - PlasmaLower: Lower triangle of C is stored.
*
* @param[in] trans
* - PlasmaNoTrans: \f[ C = \alpha A \times A^T + \beta C; \f]
* - PlasmaTrans: \f[ C = \alpha A^T \times A + \beta C. \f]
*
* @param[in] n
* The order of the matrix C. n >= 0.
*
* @param[in] k
* If trans = PlasmaNoTrans, number of columns of the A matrix;
* if trans = PlasmaTrans, number of rows of the A matrix.
*
* @param[in] alpha
* The scalar alpha.
*
* @param[in] A
* A is an lda-by-ka matrix.
* If trans = PlasmaNoTrans, ka = k;
* if trans = PlasmaTrans, ka = n.
*
* @param[in] lda
* The leading dimension of the array A.
* If trans = PlasmaNoTrans, lda >= max(1, n);
* if trans = PlasmaTrans, lda >= max(1, k).
*
* @param[in] beta
* The scalar beta.
*
* @param[in,out] C
* C is an ldc-by-n matrix.
* On exit, the uplo part of the matrix is overwritten
* by the uplo part of the updated matrix.
*
* @param[in] ldc
* The leading dimension of the array C. ldc >= max(1, n).
*
******************************************************************************/
__attribute__((weak))
void plasma_core_dsyrk_blasfeo(plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
double alpha, struct blasfeo_dmat *sA, int ai, int aj,
double beta, struct blasfeo_dmat *sC, int ci, int cj)
{
// cblas_dsyrk(CblasColMajor,
// (CBLAS_UPLO)uplo, (CBLAS_TRANSPOSE)trans,
// n, k,
// (alpha), A, lda,
// (beta), C, ldc);
blasfeo_dsyrk_ln(n, k, alpha, sA, ai, aj, sA, ai, aj, beta, sC, ci, cj, sC, ci, cj);
}
/******************************************************************************/
void plasma_core_omp_dsyrk_blasfeo(
plasma_enum_t uplo, plasma_enum_t trans,
int n, int k,
double alpha, struct blasfeo_dmat *sA, int ai, int aj,
double beta, struct blasfeo_dmat *sC, int ci, int cj,
plasma_sequence_t *sequence, plasma_request_t *request)
{
int ak;
if (trans == PlasmaNoTrans)
ak = k;
else
ak = n;
struct blasfeo_dmat sA2, sC2;
sA2 = *sA;
sC2 = *sC;
double *A = sA->pA;
int sda = sA->cn;
double *C = sC->pA;
int sdc = sC->cn;
// #pragma omp task depend(in:A[0:lda*ak]) \
// depend(inout:C[0:ldc*n])
#pragma omp task depend(in:A[0:sda*ak]) \
depend(inout:C[0:sdc*n])
{
if (sequence->status == PlasmaSuccess)
plasma_core_dsyrk_blasfeo(uplo, trans,
n, k,
alpha, &sA2, ai, aj,
beta, &sC2, ci, cj);
}
}
|
critical.c | /* Copyright (C) 2005 Free Software Foundation, Inc.
Contributed by Richard Henderson <rth@redhat.com>.
This file is part of the GNU OpenMP Library (libgomp).
Libgomp is free software; you can redistribute it and/or modify it
under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.
Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for
more details.
You should have received a copy of the GNU Lesser General Public License
along with libgomp; see the file COPYING.LIB. If not, write to the
Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
MA 02110-1301, USA. */
/* As a special exception, if you link this library with other files, some
of which are compiled with GCC, to produce an executable, this library
does not by itself cause the resulting executable to be covered by the
GNU General Public License. This exception does not however invalidate
any other reasons why the executable file might be covered by the GNU
General Public License. */
/* This file handles the CRITICAL construct. */
#include <libgomp/libgomp.h>
#include <libgomp/mutex.h>
#include <nanvix/ulib.h>
static gomp_mutex_t default_lock;
void
GOMP_critical_start (void)
{
gomp_mutex_lock (&default_lock);
}
void
GOMP_critical_end (void)
{
gomp_mutex_unlock (&default_lock);
}
#ifndef HAVE_SYNC_BUILTINS
static gomp_mutex_t create_lock_lock;
#endif
void
GOMP_critical_name_start (void **pptr)
{
gomp_mutex_t *plock;
/* If a mutex fits within the space for a pointer, and is zero initialized,
then use the pointer space directly. */
int i=0;
if (GOMP_MUTEX_INIT_0
&& sizeof (gomp_mutex_t) <= sizeof (void *)
&& __alignof (gomp_mutex_t) <= sizeof (void *))
plock = (gomp_mutex_t *)pptr;
/* Otherwise we have to be prepared to malloc storage. */
else
{
plock = *pptr;
if (plock == NULL)
{
#ifdef HAVE_SYNC_BUILTINS
gomp_mutex_t *nlock = gomp_malloc (sizeof (gomp_mutex_t));
gomp_mutex_init (nlock);
plock = __sync_val_compare_and_swap (pptr, NULL, nlock);
if (plock != NULL)
{
gomp_mutex_destroy (nlock);
ufree (nlock);
}
else
plock = nlock;
#else
gomp_mutex_lock (&create_lock_lock);
plock = *pptr;
if (plock == NULL)
{
plock = gomp_malloc (sizeof (gomp_mutex_t));
gomp_mutex_init (plock);
__sync_synchronize ();
*pptr = plock;
}
gomp_mutex_unlock (&create_lock_lock);
#endif
}
}
gomp_mutex_lock (plock);
}
void
GOMP_critical_name_end (void **pptr)
{
gomp_mutex_t *plock;
/* If a mutex fits within the space for a pointer, and is zero initialized,
then use the pointer space directly. */
if (GOMP_MUTEX_INIT_0
&& sizeof (gomp_mutex_t) <= sizeof (void *)
&& __alignof (gomp_mutex_t) <= sizeof (void *))
plock = (gomp_mutex_t *)pptr;
else
plock = *pptr;
gomp_mutex_unlock (plock);
}
/* This mutex is used when atomic operations don't exist for the target
in the mode requested. The result is not globally atomic, but works so
long as all parallel references are within #pragma omp atomic directives.
According to responses received from omp@openmp.org, appears to be within
spec. Which makes sense, since that's how several other compilers
handle this situation as well. */
static gomp_mutex_t atomic_lock;
void
GOMP_atomic_start (void)
{
gomp_mutex_lock (&atomic_lock);
}
void
GOMP_atomic_end (void)
{
gomp_mutex_unlock (&atomic_lock);
}
#if !GOMP_MUTEX_INIT_0
static void __attribute__((constructor))
initialize_critical (void)
{
gomp_mutex_init (&default_lock);
gomp_mutex_init (&atomic_lock);
#ifndef HAVE_SYNC_BUILTINS
gomp_mutex_init (&create_lock_lock);
#endif
}
#endif
|
pooling.h | // Copyright 2018 Xiaomi, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MACE_KERNELS_POOLING_H_
#define MACE_KERNELS_POOLING_H_
#include <algorithm>
#include <limits>
#include <memory>
#include <vector>
#include "mace/core/future.h"
#include "mace/core/tensor.h"
#include "mace/kernels/conv_pool_2d_util.h"
#ifdef MACE_ENABLE_OPENCL
#include "mace/core/runtime/opencl/cl2_header.h"
#endif // MACE_ENABLE_OPENCL
namespace mace {
enum PoolingType {
AVG = 1, // avg_pool
MAX = 2, // max_pool
};
namespace kernels {
struct PoolingFunctorBase {
PoolingFunctorBase(const PoolingType pooling_type,
const int *kernels,
const int *strides,
const Padding padding_type,
const std::vector<int> &paddings,
const int *dilations)
: pooling_type_(pooling_type),
kernels_(kernels),
strides_(strides),
padding_type_(padding_type),
paddings_(paddings),
dilations_(dilations) {}
const PoolingType pooling_type_;
const int *kernels_;
const int *strides_;
const Padding padding_type_;
std::vector<int> paddings_;
const int *dilations_;
};
template <DeviceType D, typename T>
struct PoolingFunctor;
template <>
struct PoolingFunctor<DeviceType::CPU, float>: PoolingFunctorBase {
PoolingFunctor(const PoolingType pooling_type,
const int *kernels,
const int *strides,
const Padding padding_type,
const std::vector<int> &paddings,
const int *dilations)
: PoolingFunctorBase(
pooling_type, kernels, strides, padding_type, paddings, dilations) {
}
void MaxPooling(const float *input,
const index_t *in_shape,
const index_t *out_shape,
const int *filter_hw,
const int *stride_hw,
const int *dilation_hw,
const int *pad_hw,
float *output) {
const index_t in_image_size = in_shape[2] * in_shape[3];
const index_t out_image_size = out_shape[2] * out_shape[3];
const index_t in_batch_size = in_shape[1] * in_image_size;
const index_t out_batch_size = out_shape[1] * out_image_size;
#pragma omp parallel for collapse(2)
for (index_t b = 0; b < out_shape[0]; ++b) {
for (index_t c = 0; c < out_shape[1]; ++c) {
const index_t out_base = b * out_batch_size + c * out_image_size;
const index_t in_base = b * in_batch_size + c * in_image_size;
const index_t out_height = out_shape[2];
const index_t out_width = out_shape[3];
const index_t in_height = in_shape[2];
const index_t in_width = in_shape[3];
for (index_t h = 0; h < out_height; ++h) {
for (index_t w = 0; w < out_width; ++w) {
const index_t out_offset = out_base + h * out_width + w;
float res = std::numeric_limits<float>::lowest();
for (int fh = 0; fh < filter_hw[0]; ++fh) {
for (int fw = 0; fw < filter_hw[1]; ++fw) {
index_t inh =
h * stride_hw[0] + dilation_hw[0] * fh - pad_hw[0];
index_t inw =
w * stride_hw[1] + dilation_hw[1] * fw - pad_hw[1];
if (inh >= 0 && inh < in_height && inw >= 0 && inw < in_width) {
index_t input_offset = in_base + inh * in_width + inw;
res = std::max(res, input[input_offset]);
}
}
}
output[out_offset] = res;
}
}
}
}
}
void AvgPooling(const float *input,
const index_t *in_shape,
const index_t *out_shape,
const int *filter_hw,
const int *stride_hw,
const int *dilation_hw,
const int *pad_hw,
float *output) {
const index_t in_image_size = in_shape[2] * in_shape[3];
const index_t out_image_size = out_shape[2] * out_shape[3];
const index_t in_batch_size = in_shape[1] * in_image_size;
const index_t out_batch_size = out_shape[1] * out_image_size;
#pragma omp parallel for collapse(2)
for (index_t b = 0; b < out_shape[0]; ++b) {
for (index_t c = 0; c < out_shape[1]; ++c) {
const index_t out_base = b * out_batch_size + c * out_image_size;
const index_t in_base = b * in_batch_size + c * in_image_size;
const index_t in_height = in_shape[2];
const index_t in_width = in_shape[3];
const index_t out_height = out_shape[2];
const index_t out_width = out_shape[3];
for (index_t h = 0; h < out_height; ++h) {
for (index_t w = 0; w < out_width; ++w) {
const index_t out_offset = out_base + h * out_width + w;
float res = 0;
int block_size = 0;
for (int fh = 0; fh < filter_hw[0]; ++fh) {
for (int fw = 0; fw < filter_hw[1]; ++fw) {
index_t inh =
h * stride_hw[0] + dilation_hw[0] * fh - pad_hw[0];
index_t inw =
w * stride_hw[1] + dilation_hw[1] * fw - pad_hw[1];
if (inh >= 0 && inh < in_height && inw >= 0 && inw < in_width) {
index_t input_offset = in_base + inh * in_width + inw;
res += input[input_offset];
++block_size;
}
}
}
output[out_offset] = res / block_size;
}
}
}
}
}
MaceStatus operator()(const Tensor *input_tensor,
Tensor *output_tensor,
StatsFuture *future) {
MACE_UNUSED(future);
std::vector<index_t> output_shape(4);
std::vector<index_t> filter_shape = {
input_tensor->dim(1), input_tensor->dim(1), kernels_[0], kernels_[1]};
std::vector<int> paddings(2);
if (paddings_.empty()) {
kernels::CalcNCHWPaddingAndOutputSize(
input_tensor->shape().data(), filter_shape.data(), dilations_,
strides_, padding_type_, output_shape.data(), paddings.data());
} else {
paddings = paddings_;
CalcNCHWOutputSize(input_tensor->shape().data(),
filter_shape.data(),
paddings_.data(),
dilations_,
strides_,
RoundType::CEIL,
output_shape.data());
}
MACE_RETURN_IF_ERROR(output_tensor->Resize(output_shape));
Tensor::MappingGuard input_guard(input_tensor);
Tensor::MappingGuard output_guard(output_tensor);
const float *input = input_tensor->data<float>();
float *output = output_tensor->mutable_data<float>();
const index_t *input_shape = input_tensor->shape().data();
int pad_hw[2] = {paddings[0] / 2, paddings[1] / 2};
if (pooling_type_ == PoolingType::MAX) {
MaxPooling(input,
input_shape,
output_shape.data(),
kernels_,
strides_,
dilations_,
pad_hw,
output);
} else if (pooling_type_ == PoolingType::AVG) {
AvgPooling(input,
input_shape,
output_shape.data(),
kernels_,
strides_,
dilations_,
pad_hw,
output);
} else {
MACE_NOT_IMPLEMENTED;
}
return MACE_SUCCESS;
}
};
#ifdef MACE_ENABLE_OPENCL
template <typename T>
struct PoolingFunctor<DeviceType::GPU, T> : PoolingFunctorBase {
PoolingFunctor(const PoolingType pooling_type,
const int *kernels,
const int *strides,
const Padding padding_type,
const std::vector<int> &paddings,
const int *dilations)
: PoolingFunctorBase(
pooling_type, kernels, strides, padding_type, paddings, dilations) {
}
MaceStatus operator()(const Tensor *input_tensor,
Tensor *output_tensor,
StatsFuture *future);
cl::Kernel kernel_;
uint32_t kwg_size_;
std::unique_ptr<BufferBase> kernel_error_;
std::vector<index_t> input_shape_;
};
#endif // MACE_ENABLE_OPENCL
} // namespace kernels
} // namespace mace
#endif // MACE_KERNELS_POOLING_H_
|
GB_binop__band_uint16.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__band_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__band_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__band_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__band_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__band_uint16)
// A*D function (colscale): GB (_AxD__band_uint16)
// D*A function (rowscale): GB (_DxB__band_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__band_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__band_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__band_uint16)
// C=scalar+B GB (_bind1st__band_uint16)
// C=scalar+B' GB (_bind1st_tran__band_uint16)
// C=A+scalar GB (_bind2nd__band_uint16)
// C=A'+scalar GB (_bind2nd_tran__band_uint16)
// C type: uint16_t
// A type: uint16_t
// B,b type: uint16_t
// BinaryOp: cij = (aij) & (bij)
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x) & (y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BAND || GxB_NO_UINT16 || GxB_NO_BAND_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__band_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__band_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__band_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__band_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__band_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = (x) & (bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__band_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = (aij) & (y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x) & (aij) ; \
}
GrB_Info GB (_bind1st_tran__band_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij) & (y) ; \
}
GrB_Info GB (_bind2nd_tran__band_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
target-25.c | #include <stdlib.h>
#include <unistd.h>
int
main ()
{
int x = 0, y = 0, z = 0, s = 11, t = 12, u = 13, w = 7, err;
#pragma omp parallel
#pragma omp single
{
#pragma omp task depend(in: x)
{
usleep (5000);
x = 1;
}
#pragma omp task depend(in: x)
{
usleep (6000);
y = 2;
}
#pragma omp task depend(out: z)
{
usleep (7000);
z = 3;
}
#pragma omp target map(tofrom: x) map(from: err) map (to: y, z) depend(inout: x, z)
err = (x != 1 || y != 2 || z != 3);
if (err)
abort ();
#pragma omp task depend(in: x)
{
usleep (5000);
x = 4;
}
#pragma omp task depend(in: x)
{
usleep (4000);
y = 5;
}
#pragma omp task depend(in: z)
{
usleep (3000);
z = 6;
}
#pragma omp target enter data nowait map (to: w)
#pragma omp target enter data depend (inout: x, z) map (to: x, y, z)
#pragma omp target map (alloc: x, y, z) map(from: err)
{
err = (x != 4 || y != 5 || z != 6);
x = 7;
y = 8;
z = 9;
}
if (err)
abort ();
#pragma omp taskwait
#pragma omp target map (alloc: w) map(from: err)
{
err = w != 7;
w = 17;
}
if (err)
abort ();
#pragma omp task depend(in: x)
{
usleep (2000);
s = 14;
}
#pragma omp task depend(in: x)
{
usleep (3000);
t = 15;
}
#pragma omp task depend(in: z)
{
usleep (4000);
u = 16;
}
#pragma omp target exit data depend (inout: x, z) map (from: x, y, z, w)
if (x != 7 || y != 8 || z != 9 || s != 14 || t != 15 || u != 16 || w != 17)
abort ();
}
return 0;
}
|
pure_convection_edgebased.h | /*
==============================================================================
KratosPFEMApplication
A library based on:
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
- CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNERS.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
//
// Project Name: Kratos
// Last Modified by: $Author: rrossi $
// Date: $Date: 2009-01-13 15:39:56 $
// Revision: $Revision: 1.3 $
//
//
#if !defined(KRATOS_PURE_CONVECTION_EDGEBASED_SOLVER_H_INCLUDED)
#define KRATOS_PURE_CONVECTION_EDGEBASED_SOLVER_H_INCLUDED
#define SPLIT_OSS
// #define SYMM_PRESS
// System includes
#include <string>
#include <iostream>
#include <algorithm>
// #include <omp.h>
// External includes
// Project includes
#include "includes/define.h"
#include "includes/model_part.h"
#include "includes/node.h"
//#include "geometries/geometry.h"
#include "utilities/geometry_utilities.h"
#include "incompressible_fluid_application.h"
namespace Kratos
{
template<unsigned int TDim, class MatrixContainer, class TSparseSpace, class TLinearSolver>
class PureConvectionEdgeBased
{
public:
//name for the self defined structure
typedef EdgesStructureType<TDim> CSR_Tuple;
typedef std::vector<CSR_Tuple> EdgesVectorType;
//name for row start and column index vectors
typedef std::vector<unsigned int> IndicesVectorType;
//defining matrix type for test calculations
typedef std::vector< array_1d<double, TDim> > CalcVectorType;
//defining type for local storage of nodal values
typedef std::vector<double> ValuesVectorType;
//defining types for matrix operations
typedef typename TSparseSpace::MatrixType TSystemMatrixType;
typedef typename TSparseSpace::VectorType TSystemVectorType;
//constructor and destructor
PureConvectionEdgeBased(MatrixContainer& mr_matrix_container,
ModelPart& mr_model_part
)
: mr_matrix_container(mr_matrix_container),mr_model_part(mr_model_part)
{};
~PureConvectionEdgeBased(){};
//***********************************
//function to initialize fluid solver
void Initialize(
)
{
KRATOS_TRY
//get number of nodes
unsigned int n_nodes = mr_model_part.Nodes().size();
//unsigned int n_edges = mr_matrix_container.GetNumberEdges();
//size data vectors
mWork.resize(n_nodes);
mPi.resize(n_nodes);
mUn.resize(n_nodes);
mUn1.resize(n_nodes);
mphi_n.resize(n_nodes);
mphi_n1.resize(n_nodes);
mA.resize(n_nodes);
mHmin.resize(n_nodes);
mTau.resize(n_nodes);
//read variables from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mUn1, mr_model_part.Nodes());
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mUn, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mphi_n1, mr_model_part.Nodes());
mr_matrix_container.FillOldScalarFromDatabase(DISTANCE, mphi_n, mr_model_part.Nodes());
//set flag for first time step
mFirstStep = true;
ValuesVectorType& aaa = mr_matrix_container.GetHmin();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
mHmin[i_node] = aaa[i_node];
}
KRATOS_CATCH("")
}
//***************************************
//function to set adequate time step size
void ComputeTimeStep(double CFLNumber)
{
KRATOS_TRY
//local variable for time step size
double delta_t = 1e10;
//getting value of current velocity
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mUn1, mr_model_part.Nodes());
//loop over all nodes
double n_nodes = mUn1.size();
for (unsigned int i_node = 0; i_node < n_nodes; i_node++)
{
//use CFL condition to compute time step size
double delta_t_i = CFLNumber * 1.0 / (norm_2(mUn1[i_node])/mHmin[i_node] ) ;
//choose the overall minimum of delta_t_i
if (delta_t_i < delta_t)
delta_t = delta_t_i;
}
//perform MPI syncronization of the dt (minimum should be kept)
//write time step size to Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
CurrentProcessInfo[DELTA_TIME] = delta_t;
KRATOS_CATCH("")
}
//**********************************************************************************
//function to solve fluid equations - fractional step 1: compute fractional momentum
Vector Solve()
{
KRATOS_TRY
//PREREQUISITES
//variables for node based data handling
ModelPart::NodesContainerType& rNodes = mr_model_part.Nodes();
int n_nodes = rNodes.size();
//storage of nodal values in local variables
ValuesVectorType rhs;
rhs.resize(n_nodes);
//read variables from Kratos
mr_matrix_container.FillVectorFromDatabase(VELOCITY, mUn1, mr_model_part.Nodes());
mr_matrix_container.FillOldVectorFromDatabase(VELOCITY, mUn, mr_model_part.Nodes());
mr_matrix_container.FillScalarFromDatabase(DISTANCE, mphi_n1, mr_model_part.Nodes());
mr_matrix_container.FillOldScalarFromDatabase(DISTANCE, mphi_n, mr_model_part.Nodes());
//read time step size from Kratos
ProcessInfo& CurrentProcessInfo = mr_model_part.GetProcessInfo();
double delta_t = CurrentProcessInfo[DELTA_TIME];
//compute advective velocity - area average of the current velocity
double coefficient = 1;
CalculateAdvectiveVelocity(mUn,mUn1,mA, coefficient);
//compute intrinsic time
double time_inv = 1.0/delta_t;
#pragma omp parallel for firstprivate(time_inv)
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& h_i = mHmin[i_node];
array_1d<double, TDim>& a_i = mA[i_node];
double vel_norm = norm_2(a_i);
mTau[i_node] = 1.0 / (2.0 * vel_norm/h_i + 0.01*time_inv );
}
mr_matrix_container.AssignVectorToVector(mphi_n, mWork); //mWork = mphi_n
//first step of Runge Kutta
// mr_matrix_container.AssignVectorToVector(mphi_n,mphi_n1); //mphi_n1 = mphi_n
mr_matrix_container.SetToZero(rhs);
CalculateRHS( mphi_n1,mA,rhs);
mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/6.0 , mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mphi_n1, mphi_n, 0.5*delta_t , mr_matrix_container.GetInvertedMass(), rhs);
//second step
mr_matrix_container.SetToZero(rhs);
CalculateRHS(mphi_n1,mA,rhs);
mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/3.0 , mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mphi_n1, mphi_n, 0.5*delta_t , mr_matrix_container.GetInvertedMass(),rhs);
//third step
CalculateAdvectiveVelocity(mUn, mUn1,mA, coefficient);
mr_matrix_container.SetToZero(rhs);
CalculateRHS( mphi_n1,mA,rhs);
mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/3.0 , mr_matrix_container.GetInvertedMass(), rhs);
mr_matrix_container.Add_Minv_value(mphi_n1, mphi_n, delta_t , mr_matrix_container.GetInvertedMass(), rhs);
//fourth step
CalculateAdvectiveVelocity(mUn, mUn1,mA, coefficient);
mr_matrix_container.SetToZero(rhs);
CalculateRHS( mphi_n1,mA,rhs );
mr_matrix_container.Add_Minv_value(mWork,mWork, delta_t/6.0 , mr_matrix_container.GetInvertedMass(), rhs);
//compute right-hand side
mr_matrix_container.AssignVectorToVector(mWork,mphi_n1);
mr_matrix_container.WriteScalarToDatabase(DISTANCE, mphi_n1, mr_model_part.Nodes());
// //compute ratio for iteration
Vector stop_criteria(TDim);
noalias(stop_criteria) = ZeroVector(TDim);
// stop_criteria[0] = 0.0;
// stop_criteria[1] = 0.0;
return stop_criteria;
KRATOS_CATCH("")
}
//*********************************************************************
//function to calculate right-hand side of fractional momentum equation
void CalculateRHS(
const ValuesVectorType& mphi,
const CalcVectorType& convective_velocity,
ValuesVectorType& rhs)
{
KRATOS_TRY
int n_nodes = mphi.size();
//calculating the convective projection
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
double& pi_i = mPi[i_node];
const double& phi_i = mphi[i_node];
//set to zero the projection
pi_i = 0;
const array_1d<double, TDim>& a_i = convective_velocity[i_node];
//loop to all the edges surrounding node I
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour];
const double& phi_j = mphi[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
edge_ij.Add_ConvectiveContribution(pi_i,a_i,phi_i,a_j,phi_j);
}
//apply inverted mass matrix
const double m_inv = mr_matrix_container.GetInvertedMass()[i_node];
pi_i *= m_inv;
// KRATOS_WATCH(pi_i);
}
//perform MPI syncronization
//calculating the RHS
double stab_low;
double stab_high;
#pragma omp parallel for private(stab_low,stab_high)
for ( int i_node = 0; i_node < n_nodes; i_node++)
{
double& rhs_i = rhs[i_node];
const double& phi_i = mphi[i_node];
const array_1d<double, TDim>& a_i = convective_velocity[i_node];
const double& pi_i = mPi[i_node];
//double& h_i = mHmin[i_node];
//initializing with the external forces (e.g. gravity)
rhs_i = 0.0;
//loop to all the edges surrounding node I
for (unsigned int csr_index=mr_matrix_container.GetRowStartIndex()[i_node]; csr_index!=mr_matrix_container.GetRowStartIndex()[i_node+1]; csr_index++)
{
unsigned int j_neighbour = mr_matrix_container.GetColumnIndex()[csr_index];
//double& rhs_j = rhs[j_neighbour];
const double& phi_j = mphi[j_neighbour];
const array_1d<double, TDim>& a_j = convective_velocity[j_neighbour];
const double& pi_j = mPi[j_neighbour];
//double& h_j = mHmin[j_neighbour];
CSR_Tuple& edge_ij = mr_matrix_container.GetEdgeValues()[csr_index];
//convection operator
edge_ij.Sub_ConvectiveContribution(rhs_i,a_i,phi_i,a_j,phi_j);
//calculate stabilization part
edge_ij.CalculateConvectionStabilization_LOW( stab_low,a_i,phi_i,a_j,phi_j);
double edge_tau = mTau[i_node];
edge_ij.CalculateConvectionStabilization_HIGH( stab_high,a_i,pi_i,a_j,pi_j);
edge_ij.Sub_StabContribution( rhs_i, edge_tau, 1.0, stab_low, stab_high);
}
// KRATOS_WATCH(rhs_i);
}
KRATOS_CATCH("")
}
void CalculateAdvectiveVelocity(
const CalcVectorType& mUn,
const CalcVectorType& mUn1,
CalcVectorType& mA,
double coefficient)
{
int n_nodes = mUn1.size();
#pragma omp parallel for
for (int i_node = 0; i_node < n_nodes; i_node++)
{
//reference for advective velocity of node i
array_1d<double, TDim>& a_i = mA[i_node];
const array_1d<double, TDim>& Un_i = mUn[i_node];
const array_1d<double, TDim>& Un1_i = mUn1[i_node];
for (unsigned int k_comp = 0; k_comp < TDim; k_comp++)
a_i[k_comp] = coefficient * Un1_i[k_comp] + (1.0 - coefficient)* Un_i[k_comp];
}
}
//*******************************
//function to free dynamic memory
void Clear()
{
KRATOS_TRY
mWork.clear();
mPi.clear();
mUn.clear();
mUn1.clear();
mA.clear();
mphi_n.clear();
mphi_n1.clear();
mHmin.clear();
mTau.clear();
KRATOS_CATCH("")
}
private:
MatrixContainer& mr_matrix_container;
ModelPart& mr_model_part;
bool msmooth_convective_velocity;
bool minclude_shock_capturing;
//nodal values
//velocity vector U at time steps n and n+1
CalcVectorType mUn1,mUn;
//pressure vector p at time steps n and n+1
ValuesVectorType mWork, mPi;
ValuesVectorType mphi_n, mphi_n1; //variable to be convected
//advective velocity vector
CalcVectorType mA;
//minimum length of the edges surrounding edges surrounding each nodal point
ValuesVectorType mHmin;
//flag for first time step
bool mFirstStep;
//intrinsic time step size
ValuesVectorType mTau;
};
} //namespace Kratos
#endif //KRATOS_PURE_CONVECTION_EDGEBASED_SOLVER_H_INCLUDED defined
|
GB_unop__identity_uint8_fp64.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__identity_uint8_fp64
// op(A') function: GB_unop_tran__identity_uint8_fp64
// C type: uint8_t
// A type: double
// cast: uint8_t cij = GB_cast_to_uint8_t ((double) (aij))
// unaryop: cij = aij
#define GB_ATYPE \
double
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
double aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
double aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ; \
Cx [pC] = z ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_FP64)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__identity_uint8_fp64
(
uint8_t *Cx, // Cx and Ax may be aliased
const double *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (double), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
double aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
Cx [p] = z ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
double aij = Ax [p] ;
uint8_t z = GB_cast_to_uint8_t ((double) (aij)) ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_uint8_fp64
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
VolumetricAveragePooling.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/VolumetricAveragePooling.c"
#else
static inline void THNN_(VolumetricAveragePooling_shapeCheck)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH) {
int64_t nslices;
int64_t itime;
int64_t iheight;
int64_t iwidth;
int64_t otime;
int64_t oheight;
int64_t owidth;
int ndim = input->nDimension;
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (input->nDimension == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
THArgCheck(kT > 0 && kW > 0 && kH > 0, 5,
"kernel size should be greater than zero, but got kT: %d kH: %d kW: %d",
kT, kH, kW);
THArgCheck(dT > 0 && dW > 0 && dH > 0, 8,
"stride should be greater than zero, but got dT: %d dH: %d dW: %d",
dT, dH, dW);
THNN_ARGCHECK(input->nDimension == 4 || input->nDimension == 5, 2, input,
"4D or 5D (batch mode) tensor expected for input, but got: %s");
THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH
&& input->size[dimt] >= kT, 2,
"input image (T: %d H: %d W: %d) smaller than "
"kernel size (kT: %d kH: %d kW: %d)",
input->size[dimt], input->size[dimh], input->size[dimw],
kT, kH, kW);
/* sizes */
nslices = input->size[dimN];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
otime = (itime - kT) / dT + 1;
oheight = (iheight - kH) / dH + 1;
owidth = (iwidth - kW) / dW + 1;
if (gradOutput != NULL) {
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimN, nslices);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimt, otime);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimh, oheight);
THNN_CHECK_DIM_SIZE(gradOutput, ndim, dimw, owidth);
}
}
static void THNN_(VolumetricAveragePooling_updateOutput_frame)(
real *input_p,
real *output_p,
int64_t nslices,
int64_t itime,
int64_t iwidth,
int64_t iheight,
int64_t otime,
int64_t owidth,
int64_t oheight,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH)
{
int64_t k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
/* loop over output */
int64_t i, j, ti;
for (ti = 0; ti < otime; ti++)
{
for (i = 0; i < oheight; i++)
{
for (j = 0; j < owidth; j++)
{
/* local pointers */
real *ip = input_p + k * itime * iwidth * iheight
+ ti * iwidth * iheight * dT + i * iwidth * dH + j * dW;
real *op = output_p + k * otime * owidth * oheight
+ ti * owidth * oheight + i * owidth + j;
/* compute local sum: */
real sum = 0.0;
int x, y, z;
for (z=0; z < kT; z++)
{
for (y = 0; y < kH; y++)
{
for (x = 0; x < kW; x++)
{
sum += *(ip + z * iwidth * iheight + y * iwidth + x);
}
}
}
/* set output to local max */
*op = sum / (kT * kW * kH);
}
}
}
}
}
void THNN_(VolumetricAveragePooling_updateOutput)(
THNNState *state,
THTensor *input,
THTensor *output,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH)
{
int64_t nslices;
int64_t itime;
int64_t iheight;
int64_t iwidth;
int64_t otime;
int64_t oheight;
int64_t owidth;
real *input_data;
real *output_data;
THNN_(VolumetricAveragePooling_shapeCheck)(
state, input, NULL, kT, kW, kH,
dT, dW, dH);
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
if (input->nDimension == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
nslices = input->size[dimN];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
otime = (itime - kT) / dT + 1;
oheight = (iheight - kH) / dH + 1;
owidth = (iwidth - kW) / dW + 1;
/* get contiguous input */
input = THTensor_(newContiguous)(input);
if (input->nDimension == 4) /* non-batch mode */
{
/* resize output */
THTensor_(resize4d)(output, nslices, otime, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
THNN_(VolumetricAveragePooling_updateOutput_frame)(
input_data, output_data, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH
);
}
else /* batch mode */
{
int64_t p;
int64_t nBatch = input->size[0];
int64_t istride = nslices * itime * iwidth * iheight;
int64_t ostride = nslices * otime * owidth * oheight;
/* resize output */
THTensor_(resize5d)(output, nBatch, nslices, otime, oheight, owidth);
input_data = THTensor_(data)(input);
output_data = THTensor_(data)(output);
#pragma omp parallel for private(p)
for (p=0; p < nBatch; p++)
{
THNN_(VolumetricAveragePooling_updateOutput_frame)(
input_data + p * istride, output_data + p * ostride, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH
);
}
}
/* cleanup */
THTensor_(free)(input);
}
static void THNN_(VolumetricAveragePooling_updateGradInput_frame)(
real *gradInput_p,
real *gradOutput_p,
int64_t nslices,
int64_t itime,
int64_t iwidth,
int64_t iheight,
int64_t otime,
int64_t owidth,
int64_t oheight,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH)
{
int64_t k;
#pragma omp parallel for private(k)
for (k = 0; k < nslices; k++)
{
/* loop over output */
int64_t i, j, ti;
for (ti = 0; ti < otime; ti++)
{
for (i = 0; i < oheight; i++)
{
for (j = 0; j < owidth; j++)
{
/* local pointers */
real *ip = gradInput_p + k * itime * iwidth * iheight
+ ti * iwidth * iheight * dT + i * iwidth * dH + j * dW;
real *op = gradOutput_p + k * otime * owidth * oheight
+ ti * owidth * oheight + i * owidth + j;
/* scatter gradients out to footprint: */
real val = *op / (kT * kW * kH);
int x,y,z;
for (z=0; z < kT; z++)
{
for (y = 0; y < kH; y++)
{
for (x = 0; x < kW; x++)
{
*(ip + z * iwidth * iheight + y * iwidth + x) += val;
}
}
}
}
}
}
}
}
void THNN_(VolumetricAveragePooling_updateGradInput)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradInput,
int kT,
int kW,
int kH,
int dT,
int dW,
int dH)
{
int nslices;
int itime;
int iheight;
int iwidth;
int otime;
int oheight;
int owidth;
real *gradInput_data;
real *gradOutput_data;
int dimN = 0;
int dimt = 1;
int dimh = 2;
int dimw = 3;
THNN_(VolumetricAveragePooling_shapeCheck)(
state, input, gradOutput, kT, kW, kH,
dT, dW, dH);
/* get contiguous gradOutput */
gradOutput = THTensor_(newContiguous)(gradOutput);
/* resize */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
if (input->nDimension == 5)
{
dimN++;
dimt++;
dimh++;
dimw++;
}
/* sizes */
nslices = input->size[dimN];
itime = input->size[dimt];
iheight = input->size[dimh];
iwidth = input->size[dimw];
otime = gradOutput->size[dimt];
oheight = gradOutput->size[dimh];
owidth = gradOutput->size[dimw];
/* get raw pointers */
gradInput_data = THTensor_(data)(gradInput);
gradOutput_data = THTensor_(data)(gradOutput);
/* backprop */
if (input->nDimension == 4) /* non-batch mode*/
{
THNN_(VolumetricAveragePooling_updateGradInput_frame)(
gradInput_data, gradOutput_data, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH
);
}
else /* batch mode */
{
int64_t p;
int64_t nBatch = input->size[0];
int64_t istride = nslices * itime * iwidth * iheight;
int64_t ostride = nslices * otime * owidth * oheight;
#pragma omp parallel for private(p)
for (p = 0; p < nBatch; p++)
{
THNN_(VolumetricAveragePooling_updateGradInput_frame)(
gradInput_data + p * istride, gradOutput_data + p * ostride, nslices,
itime, iwidth, iheight,
otime, owidth, oheight,
kT, kW, kH,
dT, dW, dH
);
}
}
/* cleanup */
THTensor_(free)(gradOutput);
}
#endif
|
GB_unaryop__lnot_uint16_uint32.c | //------------------------------------------------------------------------------
// GB_unaryop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_iterator.h"
#include "GB_unaryop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop__lnot_uint16_uint32
// op(A') function: GB_tran__lnot_uint16_uint32
// C type: uint16_t
// A type: uint32_t
// cast: uint16_t cij = (uint16_t) aij
// unaryop: cij = !(aij != 0)
#define GB_ATYPE \
uint32_t
#define GB_CTYPE \
uint16_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = !(x != 0) ;
// casting
#define GB_CASTING(z, aij) \
uint16_t z = (uint16_t) aij ;
// cij = op (cast (aij))
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GB_GETA (aij, Ax, pA) ; \
/* Cx [pC] = op (cast (aij)) */ \
GB_CASTING (z, aij) ; \
GB_OP (GB_CX (pC), z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LNOT || GxB_NO_UINT16 || GxB_NO_UINT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop__lnot_uint16_uint32
(
uint16_t *Cx, // Cx and Ax may be aliased
uint32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GB_CAST_OP (p, p) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_tran__lnot_uint16_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unaryop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
par_nongalerkin.c | /******************************************************************************
* Copyright (c) 1998 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_parcsr_ls.h"
#include "../HYPRE.h"
/* This file contains the routines for constructing non-Galerkin coarse grid
* operators, based on the original Galerkin coarse grid
*/
/* Take all of the indices from indices[start, start+1, start+2, ..., end]
* and take the corresponding entries in array and place them in-order in output.
* Assumptions:
* output is of length end-start+1
* indices never contains an index that goes out of bounds in array
* */
HYPRE_Int
hypre_GrabSubArray(HYPRE_Int * indices,
HYPRE_Int start,
HYPRE_Int end,
HYPRE_BigInt * array,
HYPRE_BigInt * output)
{
HYPRE_Int i, length;
length = end - start + 1;
for (i = 0; i < length; i++)
{ output[i] = array[ indices[start + i] ]; }
return 0;
}
/* Compute the intersection of x and y, placing
* the intersection in z. Additionally, the array
* x_data is associated with x, i.e., the entries
* that we grab from x, we also grab from x_data.
* If x[k] is placed in z[m], then x_data[k] goes to
* output_x_data[m].
*
* Assumptions:
* z is of length min(x_length, y_length)
* x and y are sorted
* x_length and y_length are similar in size, otherwise,
* looping over the smaller array and doing binary search
* in the longer array is faster.
* */
HYPRE_Int
hypre_IntersectTwoArrays(HYPRE_Int *x,
HYPRE_Real *x_data,
HYPRE_Int x_length,
HYPRE_Int *y,
HYPRE_Int y_length,
HYPRE_Int *z,
HYPRE_Real *output_x_data,
HYPRE_Int *intersect_length)
{
HYPRE_Int x_index = 0;
HYPRE_Int y_index = 0;
*intersect_length = 0;
/* Compute Intersection, looping over each array */
while ( (x_index < x_length) && (y_index < y_length) )
{
if (x[x_index] > y[y_index])
{
y_index = y_index + 1;
}
else if (x[x_index] < y[y_index])
{
x_index = x_index + 1;
}
else
{
z[*intersect_length] = x[x_index];
output_x_data[*intersect_length] = x_data[x_index];
x_index = x_index + 1;
y_index = y_index + 1;
*intersect_length = *intersect_length + 1;
}
}
return 1;
}
HYPRE_Int
hypre_IntersectTwoBigArrays(HYPRE_BigInt *x,
HYPRE_Real *x_data,
HYPRE_Int x_length,
HYPRE_BigInt *y,
HYPRE_Int y_length,
HYPRE_BigInt *z,
HYPRE_Real *output_x_data,
HYPRE_Int *intersect_length)
{
HYPRE_Int x_index = 0;
HYPRE_Int y_index = 0;
*intersect_length = 0;
/* Compute Intersection, looping over each array */
while ( (x_index < x_length) && (y_index < y_length) )
{
if (x[x_index] > y[y_index])
{
y_index = y_index + 1;
}
else if (x[x_index] < y[y_index])
{
x_index = x_index + 1;
}
else
{
z[*intersect_length] = x[x_index];
output_x_data[*intersect_length] = x_data[x_index];
x_index = x_index + 1;
y_index = y_index + 1;
*intersect_length = *intersect_length + 1;
}
}
return 1;
}
/* Copy CSR matrix A to CSR matrix B. The column indices are
* assumed to be sorted, and the sparsity pattern of B is a subset
* of the sparsity pattern of A.
*
* Assumptions:
* Column indices of A and B are sorted
* Sparsity pattern of B is a subset of A's
* A and B are the same size and have same data layout
**/
HYPRE_Int
hypre_SortedCopyParCSRData(hypre_ParCSRMatrix *A,
hypre_ParCSRMatrix *B)
{
/* Grab off A and B's data structures */
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_Real *A_offd_data = hypre_CSRMatrixData(A_offd);
hypre_CSRMatrix *B_diag = hypre_ParCSRMatrixDiag(B);
HYPRE_Int *B_diag_i = hypre_CSRMatrixI(B_diag);
HYPRE_Int *B_diag_j = hypre_CSRMatrixJ(B_diag);
HYPRE_Real *B_diag_data = hypre_CSRMatrixData(B_diag);
hypre_CSRMatrix *B_offd = hypre_ParCSRMatrixOffd(B);
HYPRE_Int *B_offd_i = hypre_CSRMatrixI(B_offd);
HYPRE_Int *B_offd_j = hypre_CSRMatrixJ(B_offd);
HYPRE_Real *B_offd_data = hypre_CSRMatrixData(B_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag);
HYPRE_Int *temp_int_array = NULL;
HYPRE_Int temp_int_array_length = 0;
HYPRE_Int i, length, offset_A, offset_B;
for (i = 0; i < num_variables; i++)
{
/* Deal with the first row entries, which may be diagonal elements */
if ( A_diag_j[A_diag_i[i]] == i)
{ offset_A = 1; }
else
{ offset_A = 0; }
if ( B_diag_j[B_diag_i[i]] == i)
{ offset_B = 1; }
else
{ offset_B = 0; }
if ( (offset_B == 1) && (offset_A == 1) )
{ B_diag_data[B_diag_i[i]] = A_diag_data[A_diag_i[i]]; }
/* This finds the intersection of the column indices, and
* also copies the matching data in A to the data array in B
**/
if ( (A_diag_i[i + 1] - A_diag_i[i] - offset_A) > temp_int_array_length )
{
hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST);
temp_int_array_length = (A_diag_i[i + 1] - A_diag_i[i] - offset_A);
temp_int_array = hypre_CTAlloc(HYPRE_Int, temp_int_array_length, HYPRE_MEMORY_HOST);
}
hypre_IntersectTwoArrays(&(A_diag_j[A_diag_i[i] + offset_A]),
&(A_diag_data[A_diag_i[i] + offset_A]),
A_diag_i[i + 1] - A_diag_i[i] - offset_A,
&(B_diag_j[B_diag_i[i] + offset_B]),
B_diag_i[i + 1] - B_diag_i[i] - offset_B,
temp_int_array,
&(B_diag_data[B_diag_i[i] + offset_B]),
&length);
if ( (A_offd_i[i + 1] - A_offd_i[i]) > temp_int_array_length )
{
hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST);
temp_int_array_length = (A_offd_i[i + 1] - A_offd_i[i]);
temp_int_array = hypre_CTAlloc(HYPRE_Int, temp_int_array_length, HYPRE_MEMORY_HOST);
}
hypre_IntersectTwoArrays(&(A_offd_j[A_offd_i[i]]),
&(A_offd_data[A_offd_i[i]]),
A_offd_i[i + 1] - A_offd_i[i],
&(B_offd_j[B_offd_i[i]]),
B_offd_i[i + 1] - B_offd_i[i],
temp_int_array,
&(B_offd_data[B_offd_i[i]]),
&length);
}
if (temp_int_array)
{ hypre_TFree(temp_int_array, HYPRE_MEMORY_HOST); }
return 1;
}
/*
* Equivalent to hypre_BoomerAMGCreateS, except, the data array of S
* is not Null and contains the data entries from A.
*/
HYPRE_Int
hypre_BoomerAMG_MyCreateS(hypre_ParCSRMatrix *A,
HYPRE_Real strength_threshold,
HYPRE_Real max_row_sum,
HYPRE_Int num_functions,
HYPRE_Int *dof_func,
hypre_ParCSRMatrix **S_ptr)
{
MPI_Comm comm = hypre_ParCSRMatrixComm(A);
hypre_ParCSRCommPkg *comm_pkg = hypre_ParCSRMatrixCommPkg(A);
hypre_ParCSRCommHandle *comm_handle;
hypre_CSRMatrix *A_diag = hypre_ParCSRMatrixDiag(A);
HYPRE_Int *A_diag_i = hypre_CSRMatrixI(A_diag);
HYPRE_Real *A_diag_data = hypre_CSRMatrixData(A_diag);
hypre_CSRMatrix *A_offd = hypre_ParCSRMatrixOffd(A);
HYPRE_Int *A_offd_i = hypre_CSRMatrixI(A_offd);
HYPRE_Real *A_offd_data = NULL;
HYPRE_Int *A_diag_j = hypre_CSRMatrixJ(A_diag);
HYPRE_Int *A_offd_j = hypre_CSRMatrixJ(A_offd);
HYPRE_BigInt *row_starts = hypre_ParCSRMatrixRowStarts(A);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(A_diag);
HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(A);
HYPRE_Int num_nonzeros_diag;
HYPRE_Int num_nonzeros_offd = 0;
HYPRE_Int num_cols_offd = 0;
hypre_ParCSRMatrix *S;
hypre_CSRMatrix *S_diag;
HYPRE_Int *S_diag_i;
HYPRE_Int *S_diag_j;
HYPRE_Real *S_diag_data;
hypre_CSRMatrix *S_offd;
HYPRE_Int *S_offd_i = NULL;
HYPRE_Int *S_offd_j = NULL;
HYPRE_Real *S_offd_data;
HYPRE_Real diag, row_scale, row_sum;
HYPRE_Int i, jA, jS;
HYPRE_Int ierr = 0;
HYPRE_Int *dof_func_offd;
HYPRE_Int num_sends;
HYPRE_Int *int_buf_data;
HYPRE_Int index, start, j;
/*--------------------------------------------------------------
* Compute a ParCSR strength matrix, S.
*
* For now, the "strength" of dependence/influence is defined in
* the following way: i depends on j if
* aij > hypre_max (k != i) aik, aii < 0
* or
* aij < hypre_min (k != i) aik, aii >= 0
* Then S_ij = aij, else S_ij = 0.
*
* NOTE: the entries are negative initially, corresponding
* to "unaccounted-for" dependence.
*----------------------------------------------------------------*/
num_nonzeros_diag = A_diag_i[num_variables];
num_cols_offd = hypre_CSRMatrixNumCols(A_offd);
A_offd_i = hypre_CSRMatrixI(A_offd);
num_nonzeros_offd = A_offd_i[num_variables];
/* Initialize S */
S = hypre_ParCSRMatrixCreate(comm, global_num_vars, global_num_vars,
row_starts, row_starts,
num_cols_offd, num_nonzeros_diag, num_nonzeros_offd);
S_diag = hypre_ParCSRMatrixDiag(S);
hypre_CSRMatrixI(S_diag) = hypre_CTAlloc(HYPRE_Int, num_variables + 1, HYPRE_MEMORY_HOST);
hypre_CSRMatrixJ(S_diag) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_diag, HYPRE_MEMORY_HOST);
hypre_CSRMatrixData(S_diag) = hypre_CTAlloc(HYPRE_Real, num_nonzeros_diag, HYPRE_MEMORY_HOST);
S_offd = hypre_ParCSRMatrixOffd(S);
hypre_CSRMatrixI(S_offd) = hypre_CTAlloc(HYPRE_Int, num_variables + 1, HYPRE_MEMORY_HOST);
S_diag_i = hypre_CSRMatrixI(S_diag);
S_diag_j = hypre_CSRMatrixJ(S_diag);
S_diag_data = hypre_CSRMatrixData(S_diag);
S_offd_i = hypre_CSRMatrixI(S_offd);
hypre_CSRMatrixMemoryLocation(S_diag) = HYPRE_MEMORY_HOST;
hypre_CSRMatrixMemoryLocation(S_offd) = HYPRE_MEMORY_HOST;
dof_func_offd = NULL;
if (num_cols_offd)
{
A_offd_data = hypre_CSRMatrixData(A_offd);
hypre_CSRMatrixJ(S_offd) = hypre_CTAlloc(HYPRE_Int, num_nonzeros_offd, HYPRE_MEMORY_HOST);
hypre_CSRMatrixData(S_offd) = hypre_CTAlloc(HYPRE_Real, num_nonzeros_offd, HYPRE_MEMORY_HOST);
S_offd_j = hypre_CSRMatrixJ(S_offd);
S_offd_data = hypre_CSRMatrixData(S_offd);
hypre_ParCSRMatrixColMapOffd(S) = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd, HYPRE_MEMORY_HOST);
if (num_functions > 1)
{
dof_func_offd = hypre_CTAlloc(HYPRE_Int, num_cols_offd, HYPRE_MEMORY_HOST);
}
}
/*-------------------------------------------------------------------
* Get the dof_func data for the off-processor columns
*-------------------------------------------------------------------*/
if (!comm_pkg)
{
hypre_MatvecCommPkgCreate(A);
comm_pkg = hypre_ParCSRMatrixCommPkg(A);
}
num_sends = hypre_ParCSRCommPkgNumSends(comm_pkg);
if (num_functions > 1)
{
int_buf_data = hypre_CTAlloc(HYPRE_Int, hypre_ParCSRCommPkgSendMapStart(comm_pkg,
num_sends), HYPRE_MEMORY_HOST);
index = 0;
for (i = 0; i < num_sends; i++)
{
start = hypre_ParCSRCommPkgSendMapStart(comm_pkg, i);
for (j = start; j < hypre_ParCSRCommPkgSendMapStart(comm_pkg, i + 1); j++)
{
int_buf_data[index++] = dof_func[hypre_ParCSRCommPkgSendMapElmt(comm_pkg, j)];
}
}
comm_handle = hypre_ParCSRCommHandleCreate( 11, comm_pkg, int_buf_data,
dof_func_offd);
hypre_ParCSRCommHandleDestroy(comm_handle);
hypre_TFree(int_buf_data, HYPRE_MEMORY_HOST);
}
/* give S same nonzero structure as A */
hypre_ParCSRMatrixCopy(A, S, 1);
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,diag,row_scale,row_sum,jA) HYPRE_SMP_SCHEDULE
#endif
for (i = 0; i < num_variables; i++)
{
diag = A_diag_data[A_diag_i[i]];
/* compute scaling factor and row sum */
row_scale = 0.0;
row_sum = diag;
if (num_functions > 1)
{
if (diag < 0)
{
for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)
{
if (dof_func[i] == dof_func[A_diag_j[jA]])
{
row_scale = hypre_max(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[jA]])
{
row_scale = hypre_max(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
else
{
for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)
{
if (dof_func[i] == dof_func[A_diag_j[jA]])
{
row_scale = hypre_min(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)
{
if (dof_func[i] == dof_func_offd[A_offd_j[jA]])
{
row_scale = hypre_min(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
}
else
{
if (diag < 0)
{
for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)
{
row_scale = hypre_max(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)
{
row_scale = hypre_max(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
else
{
for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)
{
row_scale = hypre_min(row_scale, A_diag_data[jA]);
row_sum += A_diag_data[jA];
}
for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)
{
row_scale = hypre_min(row_scale, A_offd_data[jA]);
row_sum += A_offd_data[jA];
}
}
}
/* compute row entries of S */
S_diag_j[A_diag_i[i]] = -1;
if ((fabs(row_sum) > fabs(diag)*max_row_sum) && (max_row_sum < 1.0))
{
/* make all dependencies weak */
for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)
{
S_diag_j[jA] = -1;
}
for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)
{
S_offd_j[jA] = -1;
}
}
else
{
if (num_functions > 1)
{
if (diag < 0)
{
for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)
{
if (A_diag_data[jA] <= strength_threshold * row_scale
|| dof_func[i] != dof_func[A_diag_j[jA]])
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)
{
if (A_offd_data[jA] <= strength_threshold * row_scale
|| dof_func[i] != dof_func_offd[A_offd_j[jA]])
{
S_offd_j[jA] = -1;
}
}
}
else
{
for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)
{
if (A_diag_data[jA] >= strength_threshold * row_scale
|| dof_func[i] != dof_func[A_diag_j[jA]])
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)
{
if (A_offd_data[jA] >= strength_threshold * row_scale
|| dof_func[i] != dof_func_offd[A_offd_j[jA]])
{
S_offd_j[jA] = -1;
}
}
}
}
else
{
if (diag < 0)
{
for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)
{
if (A_diag_data[jA] <= strength_threshold * row_scale)
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)
{
if (A_offd_data[jA] <= strength_threshold * row_scale)
{
S_offd_j[jA] = -1;
}
}
}
else
{
for (jA = A_diag_i[i] + 1; jA < A_diag_i[i + 1]; jA++)
{
if (A_diag_data[jA] >= strength_threshold * row_scale)
{
S_diag_j[jA] = -1;
}
}
for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)
{
if (A_offd_data[jA] >= strength_threshold * row_scale)
{
S_offd_j[jA] = -1;
}
}
}
}
}
}
/*--------------------------------------------------------------
* "Compress" the strength matrix.
*
* NOTE: S has *NO DIAGONAL ELEMENT* on any row. Caveat Emptor!
*
* NOTE: This "compression" section of code may not be removed, the
* non-Galerkin routine depends on it.
*----------------------------------------------------------------*/
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_diag_i[i] = jS;
for (jA = A_diag_i[i]; jA < A_diag_i[i + 1]; jA++)
{
if (S_diag_j[jA] > -1)
{
S_diag_j[jS] = S_diag_j[jA];
S_diag_data[jS] = S_diag_data[jA];
jS++;
}
}
}
S_diag_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_diag) = jS;
/* RDF: not sure if able to thread this loop */
jS = 0;
for (i = 0; i < num_variables; i++)
{
S_offd_i[i] = jS;
for (jA = A_offd_i[i]; jA < A_offd_i[i + 1]; jA++)
{
if (S_offd_j[jA] > -1)
{
S_offd_j[jS] = S_offd_j[jA];
S_offd_data[jS] = S_offd_data[jA];
jS++;
}
}
}
S_offd_i[num_variables] = jS;
hypre_CSRMatrixNumNonzeros(S_offd) = jS;
hypre_ParCSRMatrixCommPkg(S) = NULL;
*S_ptr = S;
hypre_TFree(dof_func_offd, HYPRE_MEMORY_HOST);
return (ierr);
}
/**
* Initialize the IJBuffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBufferInit( HYPRE_Int
*ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_Int *ijbuf_numcols )
{
HYPRE_Int ierr = 0;
(*ijbuf_cnt) = 0;
(*ijbuf_rowcounter) = 1; /*Always points to the next row*/
ijbuf_numcols[0] = 0;
return ierr;
}
/**
* Initialize the IJBuffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBigBufferInit( HYPRE_Int
*ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_BigInt *ijbuf_numcols )
{
HYPRE_Int ierr = 0;
(*ijbuf_cnt) = 0;
(*ijbuf_rowcounter) = 1; /*Always points to the next row*/
ijbuf_numcols[0] = 0;
return ierr;
}
/**
* Update the buffer counters
**/
HYPRE_Int
hypre_NonGalerkinIJBufferNewRow(HYPRE_BigInt
*ijbuf_rownums, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_numcols,
HYPRE_Int *ijbuf_rowcounter,
HYPRE_BigInt new_row)
{
HYPRE_Int ierr = 0;
/* First check to see if the previous row was empty, and if so, overwrite that row */
if ( ijbuf_numcols[(*ijbuf_rowcounter) - 1] == 0 )
{
ijbuf_rownums[(*ijbuf_rowcounter) - 1] = new_row;
}
else
{
/* Move to the next row */
ijbuf_rownums[(*ijbuf_rowcounter)] = new_row;
ijbuf_numcols[(*ijbuf_rowcounter)] = 0;
(*ijbuf_rowcounter)++;
}
return ierr;
}
/**
* Compress the current row in an IJ Buffer by removing duplicate entries
**/
HYPRE_Int
hypre_NonGalerkinIJBufferCompressRow( HYPRE_Int
*ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int ijbuf_rowcounter,
HYPRE_Real *ijbuf_data,
HYPRE_BigInt *ijbuf_cols,
HYPRE_BigInt *ijbuf_rownums,
HYPRE_Int *ijbuf_numcols)
{
HYPRE_Int ierr = 0;
HYPRE_Int nentries, i, nduplicate;
/* Compress the current row by removing any repeat entries,
* making sure to decrement ijbuf_cnt by nduplicate */
nentries = ijbuf_numcols[ ijbuf_rowcounter - 1 ];
nduplicate = 0;
hypre_BigQsort1(ijbuf_cols, ijbuf_data, (*ijbuf_cnt) - nentries, (*ijbuf_cnt) - 1 );
for (i = (*ijbuf_cnt) - nentries + 1; i <= (*ijbuf_cnt) - 1; i++)
{
if ( ijbuf_cols[i] == ijbuf_cols[i - 1] )
{
/* Shift duplicate entry down */
nduplicate++;
ijbuf_data[i - nduplicate] += ijbuf_data[i];
}
else if (nduplicate > 0)
{
ijbuf_data[i - nduplicate] = ijbuf_data[i];
ijbuf_cols[i - nduplicate] = ijbuf_cols[i];
}
}
(*ijbuf_cnt) -= nduplicate;
ijbuf_numcols[ ijbuf_rowcounter - 1 ] -= nduplicate;
return ierr;
}
/**
* Compress the entire buffer, removing duplicate rows
**/
HYPRE_Int
hypre_NonGalerkinIJBufferCompress( HYPRE_Int ijbuf_size,
HYPRE_Int *ijbuf_cnt, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int *ijbuf_rowcounter,
HYPRE_Real **ijbuf_data,
HYPRE_BigInt **ijbuf_cols,
HYPRE_BigInt **ijbuf_rownums,
HYPRE_Int **ijbuf_numcols)
{
HYPRE_Int ierr = 0;
HYPRE_Int *indys = hypre_CTAlloc(HYPRE_Int, (*ijbuf_rowcounter),
HYPRE_MEMORY_HOST);
HYPRE_Int i, j, duplicate, cnt_new, rowcounter_new, prev_row;
HYPRE_Int row_loc;
HYPRE_BigInt row_start, row_stop, row;
HYPRE_Real *data_new;
HYPRE_BigInt *cols_new;
HYPRE_BigInt *rownums_new;
HYPRE_Int *numcols_new;
/* Do a sort on rownums, but store the original order in indys.
* Then see if there are any duplicate rows */
for (i = 0; i < (*ijbuf_rowcounter); i++)
{ indys[i] = i; }
hypre_BigQsortbi((*ijbuf_rownums), indys, 0, (*ijbuf_rowcounter) - 1);
duplicate = 0;
for (i = 1; i < (*ijbuf_rowcounter); i++)
{
if (indys[i] != (indys[i - 1] + 1))
{
duplicate = 1;
break;
}
}
/* Compress duplicate rows */
if (duplicate)
{
/* Accumulate numcols, so that it functions like a CSR row-pointer */
for (i = 1; i < (*ijbuf_rowcounter); i++)
{ (*ijbuf_numcols)[i] += (*ijbuf_numcols)[i - 1]; }
/* Initialize new buffer */
prev_row = -1;
rowcounter_new = 0;
cnt_new = 0;
data_new = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
cols_new = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
rownums_new = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
numcols_new = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
numcols_new[0] = 0;
/* Cycle through each row */
for (i = 0; i < (*ijbuf_rowcounter); i++)
{
/* Find which row this is in local and global numberings, and where
* this row's data starts and stops in the buffer*/
row_loc = indys[i];
row = (*ijbuf_rownums)[i];
if (row_loc > 0)
{
row_start = (*ijbuf_numcols)[row_loc - 1];
row_stop = (*ijbuf_numcols)[row_loc];
}
else
{
row_start = 0;
row_stop = (*ijbuf_numcols)[row_loc];
}
/* Is this a new row? If so, compress previous row, and add a new
* one. Noting that prev_row = -1 is a special value */
if (row != prev_row)
{
if (prev_row != -1)
{
/* Compress previous row */
hypre_NonGalerkinIJBufferCompressRow(&cnt_new, rowcounter_new, data_new,
cols_new, rownums_new, numcols_new);
}
prev_row = row;
numcols_new[rowcounter_new] = 0;
rownums_new[rowcounter_new] = row;
rowcounter_new++;
}
/* Copy row into new buffer */
for (j = row_start; j < row_stop; j++)
{
data_new[cnt_new] = (*ijbuf_data)[j];
cols_new[cnt_new] = (*ijbuf_cols)[j];
numcols_new[rowcounter_new - 1]++;
cnt_new++;
}
}
/* Compress the final row */
if (i > 1)
{
hypre_NonGalerkinIJBufferCompressRow(&cnt_new, rowcounter_new, data_new,
cols_new, rownums_new, numcols_new);
}
*ijbuf_cnt = cnt_new;
*ijbuf_rowcounter = rowcounter_new;
/* Point to the new buffer */
hypre_TFree(*ijbuf_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(*ijbuf_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(*ijbuf_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(*ijbuf_numcols, HYPRE_MEMORY_DEVICE);
(*ijbuf_data) = data_new;
(*ijbuf_cols) = cols_new;
(*ijbuf_rownums) = rownums_new;
(*ijbuf_numcols) = numcols_new;
}
hypre_TFree(indys, HYPRE_MEMORY_HOST);
return ierr;
}
/**
* Do a buffered write to an IJ matrix.
* That is, write to the buffer, until the buffer is full. Then when the
* buffer is full, write to the IJ matrix and reset the buffer counters
* In effect, this buffers this operation
* A[row_to_write, col_to_write] += val_to_write
**/
HYPRE_Int
hypre_NonGalerkinIJBufferWrite( HYPRE_IJMatrix
B, /* Unassembled matrix to add an entry to */
HYPRE_Int *ijbuf_cnt, /* current buffer size */
HYPRE_Int ijbuf_size, /* max buffer size */
HYPRE_Int *ijbuf_rowcounter, /* num of rows in rownums, (i.e., size of rownums) */
/* This counter will increase as you call this function for multiple rows */
HYPRE_Real **ijbuf_data, /* Array of values, of size ijbuf_size */
HYPRE_BigInt **ijbuf_cols, /* Array of col indices, of size ijbuf_size */
HYPRE_BigInt
**ijbuf_rownums, /* Holds row-indices that with numcols makes for a CSR-like data structure*/
HYPRE_Int
**ijbuf_numcols, /* rownums[i] is the row num, and numcols holds the number of entries being added */
/* for that row. Note numcols is not cumulative like an actual CSR data structure*/
HYPRE_BigInt row_to_write, /* Entry to add to the buffer */
HYPRE_BigInt col_to_write, /* Ditto */
HYPRE_Real val_to_write ) /* Ditto */
{
HYPRE_Int ierr = 0;
if ( (*ijbuf_cnt) == 0 )
{
/* brand new buffer: increment buffer structures for the new row */
hypre_NonGalerkinIJBufferNewRow((*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
else if ((*ijbuf_rownums)[ (*ijbuf_rowcounter) - 1 ] != row_to_write)
{
/* If this is a new row, compress the previous row */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, (*ijbuf_rowcounter), (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
/* increment buffer structures for the new row */
hypre_NonGalerkinIJBufferNewRow( (*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter,
row_to_write);
}
/* Add new entry to buffer */
(*ijbuf_cols)[(*ijbuf_cnt)] = col_to_write;
(*ijbuf_data)[(*ijbuf_cnt)] = val_to_write;
(*ijbuf_numcols)[ (*ijbuf_rowcounter) - 1 ]++;
(*ijbuf_cnt)++;
/* Buffer is full, write to the matrix object */
if ( (*ijbuf_cnt) == (ijbuf_size - 1) )
{
/* If the last row is empty, decrement rowcounter */
if ( (*ijbuf_numcols)[ (*ijbuf_rowcounter) - 1 ] == 0)
{ (*ijbuf_rowcounter)--; }
/* Compress and Add Entries */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, (*ijbuf_rowcounter), (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
hypre_NonGalerkinIJBufferCompress(ijbuf_size, ijbuf_cnt, ijbuf_rowcounter, ijbuf_data,
ijbuf_cols, ijbuf_rownums, ijbuf_numcols);
ierr += HYPRE_IJMatrixAddToValues(B, *ijbuf_rowcounter, (*ijbuf_numcols), (*ijbuf_rownums),
(*ijbuf_cols), (*ijbuf_data));
/* Reinitialize the buffer */
hypre_NonGalerkinIJBufferInit( ijbuf_cnt, ijbuf_rowcounter, (*ijbuf_numcols));
hypre_NonGalerkinIJBufferNewRow((*ijbuf_rownums), (*ijbuf_numcols), ijbuf_rowcounter, row_to_write);
}
return ierr;
}
/**
* Empty the IJ Buffer with a final AddToValues.
**/
HYPRE_Int
hypre_NonGalerkinIJBufferEmpty(HYPRE_IJMatrix
B, /* See NonGalerkinIJBufferWrite for parameter descriptions */
HYPRE_Int ijbuf_size,
HYPRE_Int *ijbuf_cnt,
HYPRE_Int ijbuf_rowcounter,
HYPRE_Real **ijbuf_data,
HYPRE_BigInt **ijbuf_cols,
HYPRE_BigInt **ijbuf_rownums,
HYPRE_Int **ijbuf_numcols)
{
HYPRE_Int ierr = 0;
if ( (*ijbuf_cnt) > 0)
{
/* Compress the last row and then write */
hypre_NonGalerkinIJBufferCompressRow(ijbuf_cnt, ijbuf_rowcounter, (*ijbuf_data),
(*ijbuf_cols), (*ijbuf_rownums), (*ijbuf_numcols));
hypre_NonGalerkinIJBufferCompress(ijbuf_size, ijbuf_cnt, &ijbuf_rowcounter, ijbuf_data,
ijbuf_cols, ijbuf_rownums, ijbuf_numcols);
ierr += HYPRE_IJMatrixAddToValues(B, ijbuf_rowcounter, (*ijbuf_numcols), (*ijbuf_rownums),
(*ijbuf_cols), (*ijbuf_data));
}
(*ijbuf_cnt = 0);
return ierr;
}
/*
* Construct sparsity pattern based on R_I A P, plus entries required by drop tolerance
*/
hypre_ParCSRMatrix *
hypre_NonGalerkinSparsityPattern(hypre_ParCSRMatrix *R_IAP,
hypre_ParCSRMatrix *RAP,
HYPRE_Int * CF_marker,
HYPRE_Real droptol,
HYPRE_Int sym_collapse,
HYPRE_Int collapse_beta )
{
/* MPI Communicator */
MPI_Comm comm = hypre_ParCSRMatrixComm(RAP);
/* Declare R_IAP */
hypre_CSRMatrix *R_IAP_diag = hypre_ParCSRMatrixDiag(R_IAP);
HYPRE_Int *R_IAP_diag_i = hypre_CSRMatrixI(R_IAP_diag);
HYPRE_Int *R_IAP_diag_j = hypre_CSRMatrixJ(R_IAP_diag);
hypre_CSRMatrix *R_IAP_offd = hypre_ParCSRMatrixOffd(R_IAP);
HYPRE_Int *R_IAP_offd_i = hypre_CSRMatrixI(R_IAP_offd);
HYPRE_Int *R_IAP_offd_j = hypre_CSRMatrixJ(R_IAP_offd);
HYPRE_BigInt *col_map_offd_R_IAP = hypre_ParCSRMatrixColMapOffd(R_IAP);
/* Declare RAP */
hypre_CSRMatrix *RAP_diag = hypre_ParCSRMatrixDiag(RAP);
HYPRE_Int *RAP_diag_i = hypre_CSRMatrixI(RAP_diag);
HYPRE_Real *RAP_diag_data = hypre_CSRMatrixData(RAP_diag);
HYPRE_Int *RAP_diag_j = hypre_CSRMatrixJ(RAP_diag);
HYPRE_BigInt first_col_diag_RAP = hypre_ParCSRMatrixFirstColDiag(RAP);
HYPRE_Int num_cols_diag_RAP = hypre_CSRMatrixNumCols(RAP_diag);
HYPRE_BigInt last_col_diag_RAP = first_col_diag_RAP + (HYPRE_BigInt)num_cols_diag_RAP -
1;
hypre_CSRMatrix *RAP_offd = hypre_ParCSRMatrixOffd(RAP);
HYPRE_Int *RAP_offd_i = hypre_CSRMatrixI(RAP_offd);
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_j = hypre_CSRMatrixJ(RAP_offd);
HYPRE_BigInt *col_map_offd_RAP = hypre_ParCSRMatrixColMapOffd(RAP);
HYPRE_Int num_cols_RAP_offd = hypre_CSRMatrixNumCols(RAP_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(RAP_diag);
/* Declare A */
HYPRE_Int num_fine_variables = hypre_CSRMatrixNumRows(R_IAP_diag);
/* Declare IJ matrices */
HYPRE_IJMatrix Pattern;
hypre_ParCSRMatrix *Pattern_CSR = NULL;
/* Buffered IJAddToValues */
HYPRE_Int ijbuf_cnt, ijbuf_size, ijbuf_rowcounter;
HYPRE_Real *ijbuf_data;
HYPRE_BigInt *ijbuf_cols, *ijbuf_rownums;
HYPRE_Int *ijbuf_numcols;
/* Buffered IJAddToValues for Symmetric Entries */
HYPRE_Int ijbuf_sym_cnt, ijbuf_sym_rowcounter;
HYPRE_Real *ijbuf_sym_data;
HYPRE_BigInt *ijbuf_sym_cols, *ijbuf_sym_rownums;
HYPRE_Int *ijbuf_sym_numcols;
/* Other Declarations */
HYPRE_Int ierr = 0;
HYPRE_Real max_entry = 0.0;
HYPRE_Real max_entry_offd = 0.0;
HYPRE_Int * rownz = NULL;
HYPRE_Int i, j, Cpt;
HYPRE_BigInt row_start, row_end, global_row, global_col;
/* Other Setup */
if (num_cols_RAP_offd)
{ RAP_offd_data = hypre_CSRMatrixData(RAP_offd); }
/*
* Initialize the IJ matrix, leveraging our rough knowledge of the
* nonzero structure of Pattern based on RAP
*
* ilower, iupper, jlower, jupper */
ierr += HYPRE_IJMatrixCreate(comm, first_col_diag_RAP, last_col_diag_RAP, first_col_diag_RAP,
last_col_diag_RAP, &Pattern);
ierr += HYPRE_IJMatrixSetObjectType(Pattern, HYPRE_PARCSR);
rownz = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
for (i = 0; i < num_variables; i++)
{ rownz[i] = 1.2 * (RAP_diag_i[i + 1] - RAP_diag_i[i]) + 1.2 * (RAP_offd_i[i + 1] - RAP_offd_i[i]); }
HYPRE_IJMatrixSetRowSizes(Pattern, rownz);
ierr += HYPRE_IJMatrixInitialize(Pattern);
hypre_TFree(rownz, HYPRE_MEMORY_HOST);
/*
*For efficiency, we do a buffered IJAddToValues.
* Here, we initialize the buffer and then initialize the buffer counters
*/
ijbuf_size = 1000;
ijbuf_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_rownums = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_cnt, &ijbuf_rowcounter, ijbuf_cols );
if (sym_collapse)
{
ijbuf_sym_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_rownums = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_sym_cnt, &ijbuf_sym_rowcounter, ijbuf_sym_cols );
}
/*
* Place entries in R_IAP into Pattern
*/
Cpt = -1; /* Cpt contains the fine grid index of the i-th Cpt */
for (i = 0; i < num_variables; i++)
{
global_row = i + first_col_diag_RAP;
/* Find the next Coarse Point in CF_marker */
for (j = Cpt + 1; j < num_fine_variables; j++)
{
if (CF_marker[j] == 1) /* Found Next C-point */
{
Cpt = j;
break;
}
}
/* Diag Portion */
row_start = R_IAP_diag_i[Cpt];
row_end = R_IAP_diag_i[Cpt + 1];
for (j = row_start; j < row_end; j++)
{
global_col = R_IAP_diag_j[j] + first_col_diag_RAP;
/* This call adds a 1 x 1 to i j data */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0);
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0);
}
}
/* Offdiag Portion */
row_start = R_IAP_offd_i[Cpt];
row_end = R_IAP_offd_i[Cpt + 1];
for (j = row_start; j < row_end; j++)
{
global_col = col_map_offd_R_IAP[ R_IAP_offd_j[j] ];
/* This call adds a 1 x 1 to i j data */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0);
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0);
}
}
}
/*
* Use drop-tolerance to compute new entries for sparsity pattern
*/
/*#ifdef HYPRE_USING_OPENMP
#pragma omp parallel for private(i,j,max_entry,max_entry_offd,global_col,global_row) HYPRE_SMP_SCHEDULE
#endif */
for (i = 0; i < num_variables; i++)
{
global_row = i + first_col_diag_RAP;
/* Compute the drop tolerance for this row, which is just
* abs(max of row i)*droptol */
max_entry = -1.0;
for (j = RAP_diag_i[i]; j < RAP_diag_i[i + 1]; j++)
{
if ( (RAP_diag_j[j] != i) && (max_entry < fabs(RAP_diag_data[j]) ) )
{ max_entry = fabs(RAP_diag_data[j]); }
}
for (j = RAP_offd_i[i]; j < RAP_offd_i[i + 1]; j++)
{
{
if ( max_entry < fabs(RAP_offd_data[j]) )
{ max_entry = fabs(RAP_offd_data[j]); }
}
}
max_entry *= droptol;
max_entry_offd = max_entry * collapse_beta;
/* Loop over diag portion, adding all entries that are "strong" */
for (j = RAP_diag_i[i]; j < RAP_diag_i[i + 1]; j++)
{
if ( fabs(RAP_diag_data[j]) > max_entry )
{
global_col = RAP_diag_j[j] + first_col_diag_RAP;
/*#ifdef HYPRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues
* A[global_row, global_col] += 1.0 */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0 );
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0 );
}
/*}*/
}
}
/* Loop over offd portion, adding all entries that are "strong" */
for (j = RAP_offd_i[i]; j < RAP_offd_i[i + 1]; j++)
{
if ( fabs(RAP_offd_data[j]) > max_entry_offd )
{
global_col = col_map_offd_RAP[ RAP_offd_j[j] ];
/*#ifdef HYPRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues
* A[global_row, global_col] += 1.0 */
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_col, 1.0 );
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( Pattern, &ijbuf_sym_cnt,
ijbuf_size, &ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums, &ijbuf_sym_numcols,
global_col, global_row, 1.0 );
}
/*}*/
}
}
}
/* For efficiency, we do a buffered IJAddToValues.
* This empties the buffer of any remaining values */
hypre_NonGalerkinIJBufferEmpty(Pattern, ijbuf_size, &ijbuf_cnt, ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols);
if (sym_collapse)
hypre_NonGalerkinIJBufferEmpty(Pattern, ijbuf_size, &ijbuf_sym_cnt, ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols);
/* Finalize Construction of Pattern */
ierr += HYPRE_IJMatrixAssemble(Pattern);
ierr += HYPRE_IJMatrixGetObject( Pattern, (void**) &Pattern_CSR );
/* Deallocate */
ierr += HYPRE_IJMatrixSetObjectType(Pattern, -1);
ierr += HYPRE_IJMatrixDestroy(Pattern);
hypre_TFree(ijbuf_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_numcols, HYPRE_MEMORY_DEVICE);
if (sym_collapse)
{
hypre_TFree(ijbuf_sym_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_numcols, HYPRE_MEMORY_DEVICE);
}
return Pattern_CSR;
}
HYPRE_Int
hypre_BoomerAMGBuildNonGalerkinCoarseOperator( hypre_ParCSRMatrix **RAP_ptr,
hypre_ParCSRMatrix *AP,
HYPRE_Real strong_threshold,
HYPRE_Real max_row_sum,
HYPRE_Int num_functions,
HYPRE_Int * dof_func_value,
HYPRE_Int * CF_marker,
HYPRE_Real droptol, HYPRE_Int sym_collapse,
HYPRE_Real lump_percent, HYPRE_Int collapse_beta )
{
/* Initializations */
MPI_Comm comm = hypre_ParCSRMatrixComm(*RAP_ptr);
hypre_ParCSRMatrix *S = NULL;
hypre_ParCSRMatrix *RAP = *RAP_ptr;
HYPRE_Int i, j, k, row_start, row_end, value, num_cols_offd_Sext, num_procs;
HYPRE_Int S_ext_diag_size, S_ext_offd_size, last_col_diag_RAP, cnt_offd, cnt_diag, cnt;
HYPRE_Int col_indx_Pattern, current_Pattern_j, col_indx_RAP;
/* HYPRE_Real start_time = hypre_MPI_Wtime(); */
/* HYPRE_Real end_time; */
HYPRE_BigInt *temp = NULL;
HYPRE_Int ierr = 0;
char filename[256];
/* Lumping related variables */
HYPRE_IJMatrix ijmatrix;
HYPRE_BigInt * Pattern_offd_indices = NULL;
HYPRE_BigInt * S_offd_indices = NULL;
HYPRE_BigInt * offd_intersection = NULL;
HYPRE_Real * offd_intersection_data = NULL;
HYPRE_Int * diag_intersection = NULL;
HYPRE_Real * diag_intersection_data = NULL;
HYPRE_Int Pattern_offd_indices_len = 0;
HYPRE_Int Pattern_offd_indices_allocated_len = 0;
HYPRE_Int S_offd_indices_len = 0;
HYPRE_Int S_offd_indices_allocated_len = 0;
HYPRE_Int offd_intersection_len = 0;
HYPRE_Int offd_intersection_allocated_len = 0;
HYPRE_Int diag_intersection_len = 0;
HYPRE_Int diag_intersection_allocated_len = 0;
HYPRE_Real intersection_len = 0;
HYPRE_Int * Pattern_indices_ptr = NULL;
HYPRE_Int Pattern_diag_indices_len = 0;
HYPRE_Int global_row = 0;
HYPRE_Int has_row_ended = 0;
HYPRE_Real lump_value = 0.;
HYPRE_Real diagonal_lump_value = 0.;
HYPRE_Real neg_lump_value = 0.;
HYPRE_Real sum_strong_neigh = 0.;
HYPRE_Int * rownz = NULL;
/* offd and diag portions of RAP */
hypre_CSRMatrix *RAP_diag = hypre_ParCSRMatrixDiag(RAP);
HYPRE_Int *RAP_diag_i = hypre_CSRMatrixI(RAP_diag);
HYPRE_Real *RAP_diag_data = hypre_CSRMatrixData(RAP_diag);
HYPRE_Int *RAP_diag_j = hypre_CSRMatrixJ(RAP_diag);
HYPRE_BigInt first_col_diag_RAP = hypre_ParCSRMatrixFirstColDiag(RAP);
HYPRE_Int num_cols_diag_RAP = hypre_CSRMatrixNumCols(RAP_diag);
hypre_CSRMatrix *RAP_offd = hypre_ParCSRMatrixOffd(RAP);
HYPRE_Int *RAP_offd_i = hypre_CSRMatrixI(RAP_offd);
HYPRE_Real *RAP_offd_data = NULL;
HYPRE_Int *RAP_offd_j = hypre_CSRMatrixJ(RAP_offd);
HYPRE_BigInt *col_map_offd_RAP = hypre_ParCSRMatrixColMapOffd(RAP);
HYPRE_Int num_cols_RAP_offd = hypre_CSRMatrixNumCols(RAP_offd);
HYPRE_Int num_variables = hypre_CSRMatrixNumRows(RAP_diag);
HYPRE_BigInt global_num_vars = hypre_ParCSRMatrixGlobalNumRows(RAP);
/* offd and diag portions of S */
hypre_CSRMatrix *S_diag = NULL;
HYPRE_Int *S_diag_i = NULL;
HYPRE_Real *S_diag_data = NULL;
HYPRE_Int *S_diag_j = NULL;
hypre_CSRMatrix *S_offd = NULL;
HYPRE_Int *S_offd_i = NULL;
HYPRE_Real *S_offd_data = NULL;
HYPRE_Int *S_offd_j = NULL;
HYPRE_BigInt *col_map_offd_S = NULL;
HYPRE_Int num_cols_offd_S;
/* HYPRE_Int num_nonzeros_S_diag; */
/* off processor portions of S */
hypre_CSRMatrix *S_ext = NULL;
HYPRE_Int *S_ext_i = NULL;
HYPRE_Real *S_ext_data = NULL;
HYPRE_BigInt *S_ext_j = NULL;
HYPRE_Int *S_ext_diag_i = NULL;
HYPRE_Real *S_ext_diag_data = NULL;
HYPRE_Int *S_ext_diag_j = NULL;
HYPRE_Int *S_ext_offd_i = NULL;
HYPRE_Real *S_ext_offd_data = NULL;
HYPRE_Int *S_ext_offd_j = NULL;
HYPRE_BigInt *col_map_offd_Sext = NULL;
/* HYPRE_Int num_nonzeros_S_ext_diag;
HYPRE_Int num_nonzeros_S_ext_offd;
HYPRE_Int num_rows_Sext = 0; */
HYPRE_Int row_indx_Sext = 0;
/* offd and diag portions of Pattern */
hypre_ParCSRMatrix *Pattern = NULL;
hypre_CSRMatrix *Pattern_diag = NULL;
HYPRE_Int *Pattern_diag_i = NULL;
HYPRE_Real *Pattern_diag_data = NULL;
HYPRE_Int *Pattern_diag_j = NULL;
hypre_CSRMatrix *Pattern_offd = NULL;
HYPRE_Int *Pattern_offd_i = NULL;
HYPRE_Real *Pattern_offd_data = NULL;
HYPRE_Int *Pattern_offd_j = NULL;
HYPRE_BigInt *col_map_offd_Pattern = NULL;
HYPRE_Int num_cols_Pattern_offd;
HYPRE_Int my_id;
/* Buffered IJAddToValues */
HYPRE_Int ijbuf_cnt, ijbuf_size, ijbuf_rowcounter;
HYPRE_Real *ijbuf_data;
HYPRE_BigInt *ijbuf_cols, *ijbuf_rownums;
HYPRE_Int *ijbuf_numcols;
/* Buffered IJAddToValues for Symmetric Entries */
HYPRE_Int ijbuf_sym_cnt, ijbuf_sym_rowcounter;
HYPRE_Real *ijbuf_sym_data;
HYPRE_BigInt *ijbuf_sym_cols, *ijbuf_sym_rownums;
HYPRE_Int *ijbuf_sym_numcols;
/* Further Initializations */
if (num_cols_RAP_offd)
{ RAP_offd_data = hypre_CSRMatrixData(RAP_offd); }
hypre_MPI_Comm_size(comm, &num_procs);
hypre_MPI_Comm_rank(comm, &my_id);
/* Compute Sparsity Pattern */
Pattern = hypre_NonGalerkinSparsityPattern(AP, RAP, CF_marker, droptol,
sym_collapse, collapse_beta);
Pattern_diag = hypre_ParCSRMatrixDiag(Pattern);
Pattern_diag_i = hypre_CSRMatrixI(Pattern_diag);
Pattern_diag_data = hypre_CSRMatrixData(Pattern_diag);
Pattern_diag_j = hypre_CSRMatrixJ(Pattern_diag);
Pattern_offd = hypre_ParCSRMatrixOffd(Pattern);
Pattern_offd_i = hypre_CSRMatrixI(Pattern_offd);
Pattern_offd_j = hypre_CSRMatrixJ(Pattern_offd);
col_map_offd_Pattern = hypre_ParCSRMatrixColMapOffd(Pattern);
num_cols_Pattern_offd = hypre_CSRMatrixNumCols(Pattern_offd);
if (num_cols_Pattern_offd)
{ Pattern_offd_data = hypre_CSRMatrixData(Pattern_offd); }
/**
* Fill in the entries of Pattern with entries from RAP
**/
/* First, sort column indices in RAP and Pattern */
for (i = 0; i < num_variables; i++)
{
/* The diag matrices store the diagonal as first element in each row.
* We maintain that for the case of Pattern and RAP, because the
* strength of connection routine relies on it and we need to ignore
* diagonal entries in Pattern later during set intersections.
* */
/* Sort diag portion of RAP */
row_start = RAP_diag_i[i];
if ( RAP_diag_j[row_start] == i)
{ row_start = row_start + 1; }
row_end = RAP_diag_i[i + 1];
hypre_qsort1(RAP_diag_j, RAP_diag_data, row_start, row_end - 1 );
/* Sort diag portion of Pattern */
row_start = Pattern_diag_i[i];
if ( Pattern_diag_j[row_start] == i)
{ row_start = row_start + 1; }
row_end = Pattern_diag_i[i + 1];
hypre_qsort1(Pattern_diag_j, Pattern_diag_data, row_start, row_end - 1 );
/* Sort offd portion of RAP */
row_start = RAP_offd_i[i];
row_end = RAP_offd_i[i + 1];
hypre_qsort1(RAP_offd_j, RAP_offd_data, row_start, row_end - 1 );
/* Sort offd portion of Pattern */
/* Be careful to map coarse dof i with CF_marker into Pattern */
row_start = Pattern_offd_i[i];
row_end = Pattern_offd_i[i + 1];
hypre_qsort1(Pattern_offd_j, Pattern_offd_data, row_start, row_end - 1 );
}
/* Create Strength matrix based on RAP or Pattern. If Pattern is used,
* then the SortedCopyParCSRData(...) function call must also be commented
* back in */
/* hypre_SortedCopyParCSRData(RAP, Pattern); */
if (0)
{
/* hypre_BoomerAMG_MyCreateS(Pattern, strong_threshold, max_row_sum, */
hypre_BoomerAMG_MyCreateS(RAP, strong_threshold, max_row_sum,
num_functions, dof_func_value, &S);
}
else
{
/* Passing in "1, NULL" because dof_array is not needed
* because we assume that the number of functions is 1 */
/* hypre_BoomerAMG_MyCreateS(Pattern, strong_threshold, max_row_sum,*/
hypre_BoomerAMG_MyCreateS(RAP, strong_threshold, max_row_sum,
1, NULL, &S);
}
/* Grab diag and offd parts of S */
S_diag = hypre_ParCSRMatrixDiag(S);
S_diag_i = hypre_CSRMatrixI(S_diag);
S_diag_j = hypre_CSRMatrixJ(S_diag);
S_diag_data = hypre_CSRMatrixData(S_diag);
S_offd = hypre_ParCSRMatrixOffd(S);
S_offd_i = hypre_CSRMatrixI(S_offd);
S_offd_j = hypre_CSRMatrixJ(S_offd);
S_offd_data = hypre_CSRMatrixData(S_offd);
col_map_offd_S = hypre_ParCSRMatrixColMapOffd(S);
num_cols_offd_S = hypre_CSRMatrixNumCols(S_offd);
/* num_nonzeros_S_diag = S_diag_i[num_variables]; */
/* Grab part of S that is distance one away from the local rows
* This is needed later for the stencil collapsing. This section
* of the code mimics par_rap.c when it extracts Ps_ext.
* When moving from par_rap.c, the variable name changes were:
* A --> RAP
* P --> S
* Ps_ext --> S_ext
* P_ext_diag --> S_ext_diag
* P_ext_offd --> S_ext_offd
*
* The data layout of S_ext as returned by ExtractBExt gives you only global
* column indices, and must be converted to the local numbering. This code
* section constructs S_ext_diag and S_ext_offd, which are the distance 1
* couplings in S based on the sparsity structure in RAP.
* --> S_ext_diag corresponds to the same column slice that RAP_diag
* corresponds to. Thus, the column indexing is the same as in
* RAP_diag such that S_ext_diag_j[k] just needs to be offset by
* the RAP_diag first global dof offset.
* --> S_ext_offd column indexing is a little more complicated, and
* requires the computation below of col_map_S_ext_offd, which
* maps the local 0,1,2,... column indexing in S_ext_offd to global
* dof numbers. Note, that the num_cols_RAP_offd is NOT equal to
* num_cols_offd_S_ext
* --> The row indexing of S_ext_diag|offd is as follows. Use
* col_map_offd_RAP, where the first index corresponds to the
* first global row index in S_ext_diag|offd. Remember that ExtractBExt
* grabs the information from S required for locally computing
* (RAP*S)[proc_k row slice, :] */
if (num_procs > 1)
{
S_ext = hypre_ParCSRMatrixExtractBExt(S, RAP, 1);
S_ext_data = hypre_CSRMatrixData(S_ext);
S_ext_i = hypre_CSRMatrixI(S_ext);
S_ext_j = hypre_CSRMatrixBigJ(S_ext);
}
/* This uses the num_cols_RAP_offd to set S_ext_diag|offd_i, because S_ext
* is the off-processor information needed to compute RAP*S. That is,
* num_cols_RAP_offd represents the number of rows needed from S_ext for
* the multiplication */
S_ext_diag_i = hypre_CTAlloc(HYPRE_Int, num_cols_RAP_offd + 1, HYPRE_MEMORY_HOST);
S_ext_offd_i = hypre_CTAlloc(HYPRE_Int, num_cols_RAP_offd + 1, HYPRE_MEMORY_HOST);
S_ext_diag_size = 0;
S_ext_offd_size = 0;
/* num_rows_Sext = num_cols_RAP_offd; */
last_col_diag_RAP = first_col_diag_RAP + num_cols_diag_RAP - 1;
/* construct the S_ext_diag and _offd row-pointer arrays by counting elements
* This looks to create offd and diag blocks related to the local rows belonging
* to this processor...we may not need to split up S_ext this way...or we could.
* It would make for faster binary searching and set intersecting later...this will
* be the bottle neck so LETS SPLIT THIS UP Between offd and diag*/
for (i = 0; i < num_cols_RAP_offd; i++)
{
for (j = S_ext_i[i]; j < S_ext_i[i + 1]; j++)
if (S_ext_j[j] < first_col_diag_RAP || S_ext_j[j] > last_col_diag_RAP)
{
S_ext_offd_size++;
}
else
{
S_ext_diag_size++;
}
S_ext_diag_i[i + 1] = S_ext_diag_size;
S_ext_offd_i[i + 1] = S_ext_offd_size;
}
if (S_ext_diag_size)
{
S_ext_diag_j = hypre_CTAlloc(HYPRE_Int, S_ext_diag_size, HYPRE_MEMORY_HOST);
S_ext_diag_data = hypre_CTAlloc(HYPRE_Real, S_ext_diag_size, HYPRE_MEMORY_HOST);
}
if (S_ext_offd_size)
{
S_ext_offd_j = hypre_CTAlloc(HYPRE_Int, S_ext_offd_size, HYPRE_MEMORY_HOST);
S_ext_offd_data = hypre_CTAlloc(HYPRE_Real, S_ext_offd_size, HYPRE_MEMORY_HOST);
}
/* This copies over the column indices into the offd and diag parts.
* The diag portion has it's local column indices shifted to start at 0.
* The offd portion requires more work to construct the col_map_offd array
* and a local column ordering. */
cnt_offd = 0;
cnt_diag = 0;
cnt = 0;
for (i = 0; i < num_cols_RAP_offd; i++)
{
for (j = S_ext_i[i]; j < S_ext_i[i + 1]; j++)
if (S_ext_j[j] < first_col_diag_RAP || S_ext_j[j] > last_col_diag_RAP)
{
S_ext_offd_data[cnt_offd] = S_ext_data[j];
//S_ext_offd_j[cnt_offd++] = S_ext_j[j];
S_ext_j[cnt_offd++] = S_ext_j[j];
}
else
{
S_ext_diag_data[cnt_diag] = S_ext_data[j];
S_ext_diag_j[cnt_diag++] = (HYPRE_Int)(S_ext_j[j] - first_col_diag_RAP);
}
}
/* This creates col_map_offd_Sext */
if (S_ext_offd_size || num_cols_offd_S)
{
temp = hypre_CTAlloc(HYPRE_BigInt, S_ext_offd_size + num_cols_offd_S, HYPRE_MEMORY_HOST);
for (i = 0; i < S_ext_offd_size; i++)
{
temp[i] = S_ext_j[i];
}
cnt = S_ext_offd_size;
for (i = 0; i < num_cols_offd_S; i++)
{
temp[cnt++] = col_map_offd_S[i];
}
}
if (cnt)
{
/* after this, the first so many entries of temp will hold the
* unique column indices in S_ext_offd_j unioned with the indices
* in col_map_offd_S */
hypre_BigQsort0(temp, 0, cnt - 1);
num_cols_offd_Sext = 1;
value = temp[0];
for (i = 1; i < cnt; i++)
{
if (temp[i] > value)
{
value = temp[i];
temp[num_cols_offd_Sext++] = value;
}
}
}
else
{
num_cols_offd_Sext = 0;
}
/* num_nonzeros_S_ext_diag = cnt_diag;
num_nonzeros_S_ext_offd = S_ext_offd_size; */
if (num_cols_offd_Sext)
{
col_map_offd_Sext = hypre_CTAlloc(HYPRE_BigInt, num_cols_offd_Sext, HYPRE_MEMORY_HOST);
}
for (i = 0; i < num_cols_offd_Sext; i++)
{
col_map_offd_Sext[i] = temp[i];
}
if (S_ext_offd_size || num_cols_offd_S)
{
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
/* look for S_ext_offd_j[i] in col_map_offd_Sext, and set S_ext_offd_j[i]
* to the index of that column value in col_map_offd_Sext */
for (i = 0 ; i < S_ext_offd_size; i++)
S_ext_offd_j[i] = hypre_BigBinarySearch(col_map_offd_Sext,
S_ext_j[i],
num_cols_offd_Sext);
if (num_procs > 1)
{
hypre_CSRMatrixDestroy(S_ext);
S_ext = NULL;
}
/* Need to sort column indices in S and S_ext */
for (i = 0; i < num_variables; i++)
{
/* Re-Sort diag portion of Pattern, placing the diagonal entry in a
* sorted position */
row_start = Pattern_diag_i[i];
row_end = Pattern_diag_i[i + 1];
hypre_qsort1(Pattern_diag_j, Pattern_diag_data, row_start, row_end - 1 );
/* Sort diag portion of S, noting that no diagonal entry */
/* S has not "data" array...it's just NULL */
row_start = S_diag_i[i];
row_end = S_diag_i[i + 1];
hypre_qsort1(S_diag_j, S_diag_data, row_start, row_end - 1 );
/* Sort offd portion of S */
/* S has no "data" array...it's just NULL */
row_start = S_offd_i[i];
row_end = S_offd_i[i + 1];
hypre_qsort1(S_offd_j, S_offd_data, row_start, row_end - 1 );
}
/* Sort S_ext
* num_cols_RAP_offd equals num_rows for S_ext*/
for (i = 0; i < num_cols_RAP_offd; i++)
{
/* Sort diag portion of S_ext */
row_start = S_ext_diag_i[i];
row_end = S_ext_diag_i[i + 1];
hypre_qsort1(S_ext_diag_j, S_ext_diag_data, row_start, row_end - 1 );
/* Sort offd portion of S_ext */
row_start = S_ext_offd_i[i];
row_end = S_ext_offd_i[i + 1];
hypre_qsort1(S_ext_offd_j, S_ext_offd_data, row_start, row_end - 1 );
}
/*
* Now, for the fun stuff -- Computing the Non-Galerkin Operator
*/
/* Initialize the ijmatrix, leveraging our knowledge of the nonzero
* structure in Pattern */
ierr += HYPRE_IJMatrixCreate(comm, first_col_diag_RAP, last_col_diag_RAP,
first_col_diag_RAP, last_col_diag_RAP, &ijmatrix);
ierr += HYPRE_IJMatrixSetObjectType(ijmatrix, HYPRE_PARCSR);
rownz = hypre_CTAlloc(HYPRE_Int, num_variables, HYPRE_MEMORY_HOST);
for (i = 0; i < num_variables; i++)
{ rownz[i] = 1.2 * (Pattern_diag_i[i + 1] - Pattern_diag_i[i]) + 1.2 * (Pattern_offd_i[i + 1] - Pattern_offd_i[i]); }
HYPRE_IJMatrixSetRowSizes(ijmatrix, rownz);
ierr += HYPRE_IJMatrixInitialize(ijmatrix);
hypre_TFree(rownz, HYPRE_MEMORY_HOST);
/*
*For efficiency, we do a buffered IJAddToValues.
* Here, we initialize the buffer and then initialize the buffer counters
*/
ijbuf_size = 1000;
ijbuf_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_rownums = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_cnt, &ijbuf_rowcounter, ijbuf_cols );
if (sym_collapse)
{
ijbuf_sym_data = hypre_CTAlloc(HYPRE_Real, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_cols = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_rownums = hypre_CTAlloc(HYPRE_BigInt, ijbuf_size, HYPRE_MEMORY_DEVICE);
ijbuf_sym_numcols = hypre_CTAlloc(HYPRE_Int, ijbuf_size, HYPRE_MEMORY_DEVICE);
hypre_NonGalerkinIJBigBufferInit( &ijbuf_sym_cnt, &ijbuf_sym_rowcounter, ijbuf_sym_cols );
}
/*
* Eliminate Entries In RAP_diag
* */
for (i = 0; i < num_variables; i++)
{
global_row = i + first_col_diag_RAP;
row_start = RAP_diag_i[i];
row_end = RAP_diag_i[i + 1];
has_row_ended = 0;
/* Only do work if row has nonzeros */
if ( row_start < row_end)
{
/* Grab pointer to current entry in Pattern_diag */
current_Pattern_j = Pattern_diag_i[i];
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
/* Grab this row's indices out of Pattern offd and diag. This will
* be for computing index set intersections for lumping */
/* Ensure adequate length */
Pattern_offd_indices_len = Pattern_offd_i[i + 1] - Pattern_offd_i[i];
if (Pattern_offd_indices_allocated_len < Pattern_offd_indices_len)
{
hypre_TFree(Pattern_offd_indices, HYPRE_MEMORY_HOST);
Pattern_offd_indices = hypre_CTAlloc(HYPRE_BigInt, Pattern_offd_indices_len, HYPRE_MEMORY_HOST);
Pattern_offd_indices_allocated_len = Pattern_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of Pattern_offd_j */
hypre_GrabSubArray(Pattern_offd_j,
Pattern_offd_i[i], Pattern_offd_i[i + 1] - 1,
col_map_offd_Pattern, Pattern_offd_indices);
/* No need to grab info out of Pattern_diag_j[...], here we just start from
* Pattern_diag_i[i] and end at index Pattern_diag_i[i+1] - 1. We do need to
* ignore the diagonal entry in Pattern, because we don't lump entries there */
if ( Pattern_diag_j[Pattern_diag_i[i]] == i )
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i] + 1]);
Pattern_diag_indices_len = Pattern_diag_i[i + 1] - Pattern_diag_i[i] - 1;
}
else
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]]);
Pattern_diag_indices_len = Pattern_diag_i[i + 1] - Pattern_diag_i[i];
}
}
for (j = row_start; j < row_end; j++)
{
col_indx_RAP = RAP_diag_j[j];
/* Ignore zero entries in RAP */
if ( RAP_diag_data[j] != 0.0)
{
/* Don't change the diagonal, just write it */
if (col_indx_RAP == i)
{
/*#ifdef HY PRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues.
* A[global_row, global_row] += RAP_diag_data[j] */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, RAP_diag_data[j] );
/*}*/
}
/* The entry in RAP does not appear in Pattern, so LUMP it */
else if ( (col_indx_RAP < col_indx_Pattern) || has_row_ended)
{
/* Lump entry (i, col_indx_RAP) in RAP */
/* Grab the indices for row col_indx_RAP of S_offd and diag. This will
* be for computing lumping locations */
S_offd_indices_len = S_offd_i[col_indx_RAP + 1] - S_offd_i[col_indx_RAP];
if (S_offd_indices_allocated_len < S_offd_indices_len)
{
hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST);
S_offd_indices = hypre_CTAlloc(HYPRE_BigInt, S_offd_indices_len, HYPRE_MEMORY_HOST);
S_offd_indices_allocated_len = S_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of S_offd_j */
hypre_GrabSubArray(S_offd_j, S_offd_i[col_indx_RAP], S_offd_i[col_indx_RAP + 1] - 1,
col_map_offd_S, S_offd_indices);
/* No need to grab info out of S_diag_j[...], here we just start from
* S_diag_i[col_indx_RAP] and end at index S_diag_i[col_indx_RAP+1] - 1 */
/* Intersect the diag and offd pieces, remembering that the
* diag array will need to have the offset +first_col_diag_RAP */
cnt = hypre_max(S_offd_indices_len, Pattern_offd_indices_len);
if (offd_intersection_allocated_len < cnt)
{
hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST);
offd_intersection = hypre_CTAlloc(HYPRE_BigInt, cnt, HYPRE_MEMORY_HOST);
offd_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
offd_intersection_allocated_len = cnt;
}
/* This intersection also tracks S_offd_data and assumes that
* S_offd_indices is the first argument here */
hypre_IntersectTwoBigArrays(S_offd_indices,
&(S_offd_data[ S_offd_i[col_indx_RAP] ]),
S_offd_indices_len,
Pattern_offd_indices,
Pattern_offd_indices_len,
offd_intersection,
offd_intersection_data,
&offd_intersection_len);
/* Now, intersect the indices for the diag block. Note that S_diag_j does
* not have a diagonal entry, so no lumping occurs to the diagonal. */
cnt = hypre_max(Pattern_diag_indices_len,
S_diag_i[col_indx_RAP + 1] - S_diag_i[col_indx_RAP] );
if (diag_intersection_allocated_len < cnt)
{
hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST);
diag_intersection = hypre_CTAlloc(HYPRE_Int, cnt, HYPRE_MEMORY_HOST);
diag_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
diag_intersection_allocated_len = cnt;
}
/* There is no diagonal entry in first position of S */
hypre_IntersectTwoArrays( &(S_diag_j[S_diag_i[col_indx_RAP]]),
&(S_diag_data[ S_diag_i[col_indx_RAP] ]),
S_diag_i[col_indx_RAP + 1] - S_diag_i[col_indx_RAP],
Pattern_indices_ptr,
Pattern_diag_indices_len,
diag_intersection,
diag_intersection_data,
&diag_intersection_len);
/* Loop over these intersections, and lump a constant fraction of
* RAP_diag_data[j] to each entry */
intersection_len = diag_intersection_len + offd_intersection_len;
if (intersection_len > 0)
{
/* Sum the strength-of-connection values from row
* col_indx_RAP in S, corresponding to the indices we are
* collapsing to in row i This will give us our collapsing
* weights. */
sum_strong_neigh = 0.0;
for (k = 0; k < diag_intersection_len; k++)
{ sum_strong_neigh += fabs(diag_intersection_data[k]); }
for (k = 0; k < offd_intersection_len; k++)
{ sum_strong_neigh += fabs(offd_intersection_data[k]); }
sum_strong_neigh = RAP_diag_data[j] / sum_strong_neigh;
/* When lumping with the diag_intersection, must offset column index */
for (k = 0; k < diag_intersection_len; k++)
{
lump_value = lump_percent * fabs(diag_intersection_data[k]) * sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(diag_intersection_data[k]) * sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
cnt = diag_intersection[k] + first_col_diag_RAP;
/*#ifdef HY PRE_USING_OPENMP
#pragma omp critical (IJAdd)
#endif
{*/
/* For efficiency, we do a buffered IJAddToValues.
* A[global_row, cnt] += RAP_diag_data[j] */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, lump_value );
if (lump_percent < 1.0)
{
/* Preserve row sum by updating diagonal */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
/* Update mirror entry */
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value );
/* Update mirror entry diagonal */
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, cnt, neg_lump_value );
}
/*}*/
}
/* The offd_intersection has global column indices, i.e., the
* col_map arrays contain global indices */
for (k = 0; k < offd_intersection_len; k++)
{
lump_value = lump_percent * fabs(offd_intersection_data[k]) * sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(offd_intersection_data[k]) * sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
offd_intersection[k], lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
global_row, diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
global_row, lump_value );
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
offd_intersection[k], neg_lump_value );
}
}
}
/* If intersection is empty, do not eliminate entry */
else
{
/* Don't forget to update mirror entry if collapsing symmetrically */
if (sym_collapse)
{ lump_value = 0.5 * RAP_diag_data[j]; }
else
{ lump_value = RAP_diag_data[j]; }
cnt = col_indx_RAP + first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, lump_value );
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value );
}
}
}
/* The entry in RAP appears in Pattern, so keep it */
else if (col_indx_RAP == col_indx_Pattern)
{
cnt = col_indx_RAP + first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
cnt, RAP_diag_data[j] );
/* Only go to the next entry in Pattern, if this is not the end of a row */
if ( current_Pattern_j < Pattern_diag_i[i + 1] - 1 )
{
current_Pattern_j += 1;
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
}
else
{ has_row_ended = 1;}
}
/* Increment col_indx_Pattern, and repeat this loop iter for current
* col_ind_RAP value */
else if (col_indx_RAP > col_indx_Pattern)
{
for (; current_Pattern_j < Pattern_diag_i[i + 1]; current_Pattern_j++)
{
col_indx_Pattern = Pattern_diag_j[current_Pattern_j];
if (col_indx_RAP <= col_indx_Pattern)
{ break;}
}
/* If col_indx_RAP is still greater (i.e., we've reached a row end), then
* we need to lump everything else in this row */
if (col_indx_RAP > col_indx_Pattern)
{ has_row_ended = 1; }
/* Decrement j, in order to repeat this loop iteration for the current
* col_indx_RAP value */
j--;
}
}
}
}
/*
* Eliminate Entries In RAP_offd
* Structure of this for-loop is very similar to the RAP_diag for-loop
* But, not so similar that these loops should be combined into a single fuction.
* */
if (num_cols_RAP_offd)
{
for (i = 0; i < num_variables; i++)
{
global_row = i + first_col_diag_RAP;
row_start = RAP_offd_i[i];
row_end = RAP_offd_i[i + 1];
has_row_ended = 0;
/* Only do work if row has nonzeros */
if ( row_start < row_end)
{
current_Pattern_j = Pattern_offd_i[i];
Pattern_offd_indices_len = Pattern_offd_i[i + 1] - Pattern_offd_i[i];
if ( (Pattern_offd_j != NULL) && (Pattern_offd_indices_len > 0) )
{ col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ]; }
else
{
/* if Pattern_offd_j is not allocated or this is a zero length row,
then all entries need to be lumped.
This is an analagous situation to has_row_ended=1. */
col_indx_Pattern = -1;
has_row_ended = 1;
}
/* Grab this row's indices out of Pattern offd and diag. This will
* be for computing index set intersections for lumping. The above
* loop over RAP_diag ensures adequate length of Pattern_offd_indices */
/* Ensure adequate length */
hypre_GrabSubArray(Pattern_offd_j,
Pattern_offd_i[i], Pattern_offd_i[i + 1] - 1,
col_map_offd_Pattern, Pattern_offd_indices);
/* No need to grab info out of Pattern_diag_j[...], here we just start from
* Pattern_diag_i[i] and end at index Pattern_diag_i[i+1] - 1. We do need to
* ignore the diagonal entry in Pattern, because we don't lump entries there */
if ( Pattern_diag_j[Pattern_diag_i[i]] == i )
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i] + 1]);
Pattern_diag_indices_len = Pattern_diag_i[i + 1] - Pattern_diag_i[i] - 1;
}
else
{
Pattern_indices_ptr = &( Pattern_diag_j[Pattern_diag_i[i]]);
Pattern_diag_indices_len = Pattern_diag_i[i + 1] - Pattern_diag_i[i];
}
}
for (j = row_start; j < row_end; j++)
{
/* Ignore zero entries in RAP */
if ( RAP_offd_data[j] != 0.0)
{
/* In general for all the offd_j arrays, we have to indirectly
* index with the col_map_offd array to get a global index */
col_indx_RAP = col_map_offd_RAP[ RAP_offd_j[j] ];
/* The entry in RAP does not appear in Pattern, so LUMP it */
if ( (col_indx_RAP < col_indx_Pattern) || has_row_ended)
{
/* The row_indx_Sext would be found with:
row_indx_Sext = hypre_BinarySearch(col_map_offd_RAP, col_indx_RAP, num_cols_RAP_offd);
But, we already know the answer to this with, */
row_indx_Sext = RAP_offd_j[j];
/* Grab the indices for row row_indx_Sext from the offd and diag parts. This will
* be for computing lumping locations */
S_offd_indices_len = S_ext_offd_i[row_indx_Sext + 1] - S_ext_offd_i[row_indx_Sext];
if (S_offd_indices_allocated_len < S_offd_indices_len)
{
hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST);
S_offd_indices = hypre_CTAlloc(HYPRE_BigInt, S_offd_indices_len, HYPRE_MEMORY_HOST);
S_offd_indices_allocated_len = S_offd_indices_len;
}
/* Grab sub array from col_map, corresponding to the slice of S_ext_offd_j */
hypre_GrabSubArray(S_ext_offd_j, S_ext_offd_i[row_indx_Sext], S_ext_offd_i[row_indx_Sext + 1] - 1,
col_map_offd_Sext, S_offd_indices);
/* No need to grab info out of S_ext_diag_j[...], here we just start from
* S_ext_diag_i[row_indx_Sext] and end at index S_ext_diag_i[row_indx_Sext+1] - 1 */
/* Intersect the diag and offd pieces, remembering that the
* diag array will need to have the offset +first_col_diag_RAP */
cnt = hypre_max(S_offd_indices_len, Pattern_offd_indices_len);
if (offd_intersection_allocated_len < cnt)
{
hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST);
offd_intersection = hypre_CTAlloc(HYPRE_BigInt, cnt, HYPRE_MEMORY_HOST);
offd_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
offd_intersection_allocated_len = cnt;
}
hypre_IntersectTwoBigArrays(S_offd_indices,
&(S_ext_offd_data[ S_ext_offd_i[row_indx_Sext] ]),
S_offd_indices_len,
Pattern_offd_indices,
Pattern_offd_indices_len,
offd_intersection,
offd_intersection_data,
&offd_intersection_len);
/* Now, intersect the indices for the diag block. */
cnt = hypre_max(Pattern_diag_indices_len,
S_ext_diag_i[row_indx_Sext + 1] - S_ext_diag_i[row_indx_Sext] );
if (diag_intersection_allocated_len < cnt)
{
hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST);
diag_intersection = hypre_CTAlloc(HYPRE_Int, cnt, HYPRE_MEMORY_HOST);
diag_intersection_data = hypre_CTAlloc(HYPRE_Real, cnt, HYPRE_MEMORY_HOST);
diag_intersection_allocated_len = cnt;
}
hypre_IntersectTwoArrays( &(S_ext_diag_j[S_ext_diag_i[row_indx_Sext]]),
&(S_ext_diag_data[ S_ext_diag_i[row_indx_Sext] ]),
S_ext_diag_i[row_indx_Sext + 1] - S_ext_diag_i[row_indx_Sext],
Pattern_indices_ptr,
Pattern_diag_indices_len,
diag_intersection,
diag_intersection_data,
&diag_intersection_len);
/* Loop over these intersections, and lump a constant fraction of
* RAP_offd_data[j] to each entry */
intersection_len = diag_intersection_len + offd_intersection_len;
if (intersection_len > 0)
{
/* Sum the strength-of-connection values from row
* row_indx_Sext in S, corresponding to the indices we are
* collapsing to in row i. This will give us our collapsing
* weights. */
sum_strong_neigh = 0.0;
for (k = 0; k < diag_intersection_len; k++)
{ sum_strong_neigh += fabs(diag_intersection_data[k]); }
for (k = 0; k < offd_intersection_len; k++)
{ sum_strong_neigh += fabs(offd_intersection_data[k]); }
sum_strong_neigh = RAP_offd_data[j] / sum_strong_neigh;
/* When lumping with the diag_intersection, must offset column index */
for (k = 0; k < diag_intersection_len; k++)
{
lump_value = lump_percent * fabs(diag_intersection_data[k]) * sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(diag_intersection_data[k]) * sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
cnt = diag_intersection[k] + first_col_diag_RAP;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, cnt, lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row,
diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, global_row, lump_value);
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, cnt, cnt, neg_lump_value );
}
}
/* The offd_intersection has global column indices, i.e., the
* col_map arrays contain global indices */
for (k = 0; k < offd_intersection_len; k++)
{
lump_value = lump_percent * fabs(offd_intersection_data[k]) * sum_strong_neigh;
diagonal_lump_value = (1.0 - lump_percent) * fabs(offd_intersection_data[k]) * sum_strong_neigh;
neg_lump_value = -1.0 * lump_value;
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row,
offd_intersection[k], lump_value );
if (lump_percent < 1.0)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, global_row,
diagonal_lump_value );
}
/* Update mirror entries, if symmetric collapsing */
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
global_row, lump_value );
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size,
&ijbuf_sym_rowcounter, &ijbuf_sym_data,
&ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, offd_intersection[k],
offd_intersection[k], neg_lump_value );
}
}
}
/* If intersection is empty, do not eliminate entry */
else
{
/* Don't forget to update mirror entry if collapsing symmetrically */
if (sym_collapse)
{ lump_value = 0.5 * RAP_offd_data[j]; }
else
{ lump_value = RAP_offd_data[j]; }
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, col_indx_RAP,
lump_value );
if (sym_collapse)
{
hypre_NonGalerkinIJBufferWrite( ijmatrix,
&ijbuf_sym_cnt, ijbuf_size, &ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols, col_indx_RAP, global_row,
lump_value );
}
}
}
/* The entry in RAP appears in Pattern, so keep it */
else if (col_indx_RAP == col_indx_Pattern)
{
/* For the offd structure, col_indx_RAP is a global dof number */
hypre_NonGalerkinIJBufferWrite( ijmatrix, &ijbuf_cnt, ijbuf_size, &ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols, global_row, col_indx_RAP,
RAP_offd_data[j]);
/* Only go to the next entry in Pattern, if this is not the end of a row */
if ( current_Pattern_j < Pattern_offd_i[i + 1] - 1 )
{
current_Pattern_j += 1;
col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ];
}
else
{ has_row_ended = 1;}
}
/* Increment col_indx_Pattern, and repeat this loop iter for current
* col_ind_RAP value */
else if (col_indx_RAP > col_indx_Pattern)
{
for (; current_Pattern_j < Pattern_offd_i[i + 1]; current_Pattern_j++)
{
col_indx_Pattern = col_map_offd_Pattern[ Pattern_offd_j[current_Pattern_j] ];
if (col_indx_RAP <= col_indx_Pattern)
{ break;}
}
/* If col_indx_RAP is still greater (i.e., we've reached a row end), then
* we need to lump everything else in this row */
if (col_indx_RAP > col_indx_Pattern)
{ has_row_ended = 1; }
/* Decrement j, in order to repeat this loop iteration for the current
* col_indx_RAP value */
j--;
}
}
}
}
}
/* For efficiency, we do a buffered IJAddToValues.
* This empties the buffer of any remaining values */
hypre_NonGalerkinIJBufferEmpty(ijmatrix, ijbuf_size, &ijbuf_cnt, ijbuf_rowcounter,
&ijbuf_data, &ijbuf_cols, &ijbuf_rownums, &ijbuf_numcols);
if (sym_collapse)
hypre_NonGalerkinIJBufferEmpty(ijmatrix, ijbuf_size, &ijbuf_sym_cnt, ijbuf_sym_rowcounter,
&ijbuf_sym_data, &ijbuf_sym_cols, &ijbuf_sym_rownums,
&ijbuf_sym_numcols);
/* Assemble non-Galerkin Matrix, and overwrite current RAP*/
ierr += HYPRE_IJMatrixAssemble (ijmatrix);
ierr += HYPRE_IJMatrixGetObject( ijmatrix, (void**) RAP_ptr);
/* Optional diagnostic matrix printing */
if (0)
{
hypre_sprintf(filename, "Pattern_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(Pattern, 0, 0, filename);
hypre_sprintf(filename, "Strength_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(S, 0, 0, filename);
hypre_sprintf(filename, "RAP_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(RAP, 0, 0, filename);
hypre_sprintf(filename, "RAPc_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(*RAP_ptr, 0, 0, filename);
hypre_sprintf(filename, "AP_%d.ij", global_num_vars);
hypre_ParCSRMatrixPrintIJ(AP, 0, 0, filename);
}
/* Free matrices and variables and arrays */
hypre_TFree(ijbuf_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_numcols, HYPRE_MEMORY_DEVICE);
if (sym_collapse)
{
hypre_TFree(ijbuf_sym_data, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_cols, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_rownums, HYPRE_MEMORY_DEVICE);
hypre_TFree(ijbuf_sym_numcols, HYPRE_MEMORY_DEVICE);
}
hypre_TFree(Pattern_offd_indices, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_diag_i, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_offd_i, HYPRE_MEMORY_HOST);
hypre_TFree(S_offd_indices, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(offd_intersection_data, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection, HYPRE_MEMORY_HOST);
hypre_TFree(diag_intersection_data, HYPRE_MEMORY_HOST);
if (S_ext_diag_size)
{
hypre_TFree(S_ext_diag_j, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_diag_data, HYPRE_MEMORY_HOST);
}
if (S_ext_offd_size)
{
hypre_TFree(S_ext_offd_j, HYPRE_MEMORY_HOST);
hypre_TFree(S_ext_offd_data, HYPRE_MEMORY_HOST);
}
if (num_cols_offd_Sext)
{ hypre_TFree(col_map_offd_Sext, HYPRE_MEMORY_HOST); }
ierr += hypre_ParCSRMatrixDestroy(Pattern);
ierr += hypre_ParCSRMatrixDestroy(RAP);
ierr += hypre_ParCSRMatrixDestroy(S);
ierr += HYPRE_IJMatrixSetObjectType(ijmatrix, -1);
ierr += HYPRE_IJMatrixDestroy(ijmatrix);
/*end_time = hypre_MPI_Wtime();
if(my_id == 0)
{ fprintf(stdout, "NonGalerkin Time: %1.2e\n", end_time-start_time); } */
return ierr;
}
|
GB_binop__ge_uint8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__ge_uint8
// A.*B function (eWiseMult): GB_AemultB__ge_uint8
// A*D function (colscale): GB_AxD__ge_uint8
// D*A function (rowscale): GB_DxB__ge_uint8
// C+=B function (dense accum): GB_Cdense_accumB__ge_uint8
// C+=b function (dense accum): GB_Cdense_accumb__ge_uint8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__ge_uint8
// C=scalar+B GB_bind1st__ge_uint8
// C=scalar+B' GB_bind1st_tran__ge_uint8
// C=A+scalar GB_bind2nd__ge_uint8
// C=A'+scalar GB_bind2nd_tran__ge_uint8
// C type: bool
// A type: uint8_t
// B,b type: uint8_t
// BinaryOp: cij = (aij >= bij)
#define GB_ATYPE \
uint8_t
#define GB_BTYPE \
uint8_t
#define GB_CTYPE \
bool
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
0
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
bool t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = (x >= y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_GE || GxB_NO_UINT8 || GxB_NO_GE_UINT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__ge_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__ge_uint8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
#include "GB_dense_subassign_23_template.c"
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__ge_uint8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if 0
{
// get the scalar b for C += b, of type uint8_t
uint8_t bwork = (*((uint8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__ge_uint8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__ge_uint8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *GB_RESTRICT Cx = (bool *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__ge_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__ge_uint8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__ge_uint8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
bool *Cx = (bool *) Cx_output ;
uint8_t x = (*((uint8_t *) x_input)) ;
uint8_t *Bx = (uint8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint8_t bij = Bx [p] ;
Cx [p] = (x >= bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__ge_uint8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
bool *Cx = (bool *) Cx_output ;
uint8_t *Ax = (uint8_t *) Ax_input ;
uint8_t y = (*((uint8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint8_t aij = Ax [p] ;
Cx [p] = (aij >= y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (x >= aij) ; \
}
GrB_Info GB_bind1st_tran__ge_uint8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t x = (*((const uint8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint8_t aij = Ax [pA] ; \
Cx [pC] = (aij >= y) ; \
}
GrB_Info GB_bind2nd_tran__ge_uint8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint8_t y = (*((const uint8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
django_scrypt_fmt_plug.c | /* scrypt cracker patch for JtR. Hacked together during May of 2013 by Dhiru
* Kholia <dhiru at openwall.com>.
*
* This software is Copyright (c) 2013 Dhiru Kholia <dhiru at openwall.com> and
* it is hereby released to the general public under the following terms:
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_django_scrypt;
#elif FMT_REGISTERS_H
john_register_one(&fmt_django_scrypt);
#else
#include <string.h>
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "base64.h"
#include "escrypt/crypto_scrypt.h"
#ifdef _OPENMP
static int omp_t = 1;
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 1 // So slow a format, a multiplier is NOT needed
#endif
#endif
#include "memdbg.h"
#define FORMAT_LABEL "django-scrypt"
#define FORMAT_NAME ""
#define FORMAT_TAG "scrypt$"
#define TAG_LENGTH (sizeof(FORMAT_TAG)-1)
#ifdef __XOP__
#define ALGORITHM_NAME "Salsa20/8 128/128 XOP"
#elif defined(__AVX__)
#define ALGORITHM_NAME "Salsa20/8 128/128 AVX"
#elif defined(__SSE2__)
#define ALGORITHM_NAME "Salsa20/8 128/128 SSE2"
#else
#define ALGORITHM_NAME "Salsa20/8 32/" ARCH_BITS_STR
#endif
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 64
#define SALT_SIZE sizeof(struct custom_salt)
#define BINARY_ALIGN 4
#define SALT_ALIGN 4
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
/* notastrongpassword => scrypt$NBGmaGIXijJW$14$8$1$64$achPt01SbytSt+F3CcCFgEPr96+/j9iCTdejFdAARZ8mzfejrP64TJ5XBJa3gYwuCKOEGlw2E/lWCWS7LeS6CA== */
static struct fmt_tests scrypt_tests[] = {
/* https://pypi.python.org/pypi/django-scrypt/ format hashes */
{"scrypt$NBGmaGIXijJW$14$8$1$64$achPt01SbytSt+F3CcCFgEPr96+/j9iCTdejFdAARZ8mzfejrP64TJ5XBJa3gYwuCKOEGlw2E/lWCWS7LeS6CA==", "notastrongpassword"},
{"scrypt$Cj0PzdtT3qS2$14$8$1$64$qn4CDnM8CcIBNrpQXHo6ti8vSUoSXj7GBFy7k1bp5wPs8jKjh/gHZ+qM9uk6LbcVHm02yBaI5WCbDm/Shq/MXA==", "realmenuseJtR"},
{NULL}
};
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static struct custom_salt {
/* int type; */ // not used (another type probably required a new JtR format)
int N;
int r;
int p;
unsigned char salt[32];
} *cur_salt;
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_key));
crypt_out = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_out));
}
static void done(void)
{
MEM_FREE(crypt_out);
MEM_FREE(saved_key);
}
static int isDigits(char *p) {
while (*p && *p != '$') {
if (*p <= '0' || *p >= '9')
return 0;
++p;
}
return 1;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
char *cp, *cp2;
if (strncmp(ciphertext, FORMAT_TAG, TAG_LENGTH)) return 0;
cp = ciphertext + TAG_LENGTH;
cp2 = strchr(cp, '$');
if (!cp2) return 0;
if (cp2-cp > 32) return 0;
cp = &cp2[1];
if (isDigits(cp) == 0) return 0;
cp = strchr(cp, '$');
if (!cp) return 0;
++cp;
if (isDigits(cp) == 0) return 0;
cp = strchr(cp, '$');
if (!cp) return 0;
++cp;
if (isDigits(cp) == 0) return 0;
cp = strchr(cp, '$');
if (!cp) return 0;
++cp;
if (isDigits(cp) == 0) return 0;
cp = strchr(cp, '$');
if (!cp) return 0;
++cp;
if (strlen(cp) != 88) return 0;
return 1;
}
static void *get_salt(char *ciphertext)
{
char *ctcopy = strdup(ciphertext);
char *keeptr = ctcopy;
char *p;
/* ensure alignment */
static union {
struct custom_salt _cs;
ARCH_WORD_32 dummy;
} un;
static struct custom_salt *cs = &(un._cs);
ctcopy += TAG_LENGTH;
p = strtokm(ctcopy, "$");
strncpy((char*)cs->salt, p, 32);
p = strtokm(NULL, "$");
cs->N = atoi(p);
p = strtokm(NULL, "$");
cs->r = atoi(p);
p = strtokm(NULL, "$");
cs->p = atoi(p);
MEM_FREE(keeptr);
return (void *)cs;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE + 1];
ARCH_WORD dummy;
} buf;
unsigned char *out = buf.c;
char *p;
p = strrchr(ciphertext, '$') + 1;
base64_decode(p, strlen(p), (char*)out);
return out;
}
static int get_hash_0(int index) { return crypt_out[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { return crypt_out[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { return crypt_out[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { return crypt_out[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { return crypt_out[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { return crypt_out[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { return crypt_out[index][0] & PH_MASK_6; }
static void set_salt(void *salt)
{
cur_salt = (struct custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#ifdef _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index++)
#endif
{
if (crypto_scrypt((unsigned char*)saved_key[index], strlen((char*)saved_key[index]),
cur_salt->salt, strlen((char*)cur_salt->salt),
(1ULL) << cur_salt->N, cur_salt->r,
cur_salt->p, (unsigned char*)crypt_out[index],
BINARY_SIZE) == -1)
{
memset(crypt_out[index], 0, sizeof(crypt_out[index]));
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index = 0;
#ifdef _OPENMP
for (; index < count; index++)
#endif
if (!memcmp(binary, crypt_out[index], ARCH_SIZE))
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static void scrypt_set_key(char *key, int index)
{
int saved_len = strlen(key);
if (saved_len > PLAINTEXT_LENGTH)
saved_len = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, saved_len);
saved_key[index][saved_len] = 0;
}
static char *get_key(int index)
{
return saved_key[index];
}
static unsigned int tunable_cost_N(void *salt)
{
static struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->N;
}
static unsigned int tunable_cost_r(void *salt)
{
static struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->r;
}
static unsigned int tunable_cost_p(void *salt)
{
static struct custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->p;
}
struct fmt_main fmt_django_scrypt = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP,
{
"N",
"r",
"p"
},
{ FORMAT_TAG },
scrypt_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
valid,
fmt_default_split,
get_binary,
get_salt,
{
tunable_cost_N,
tunable_cost_r,
tunable_cost_p
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
scrypt_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
deviceCode-parallel.c | /*
Copyright (c) 2020-2021 Hugo Melder and openTIDAL contributors
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
/* OTSession tests
*/
#include "../../Source/openTIDAL.h"
#include <stdio.h>
int
main (void)
{
struct OTSessionContainer *session;
enum OTTypes type = CONTENT_CONTAINER;
int i;
session = OTSessionInit ();
if (!session)
return -1;
if (!(OTSessionClientPair (session, "CLIENTID", "CLIENTSECRET") == 0))
return -1;
#pragma omp parallel for
for (i = 0; i < 10; i++)
{
void *handle;
struct OTContentContainer *content;
/* Creating and closing a session for one
* request is not efficient. This is only a test. */
handle = OTHttpThreadHandleCreate ();
content = OTServiceGetDeviceCode (session, handle);
if (content)
{
printf ("Response Not NULL\n");
if (content->status == SUCCESS)
{
struct OTJsonContainer *deviceCode = NULL;
deviceCode = OTJsonGetObjectItem (content->tree, "deviceCode");
printf ("DeviceCode: %s\n", OTJsonGetStringValue (deviceCode));
}
}
OTDeallocContainer (content, type);
OTHttpThreadHandleCleanup (handle);
}
OTSessionCleanup (session);
return 0;
}
|
GB_binop__land_int8.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__land_int8
// A.*B function (eWiseMult): GB_AemultB__land_int8
// A*D function (colscale): GB_AxD__land_int8
// D*A function (rowscale): GB_DxB__land_int8
// C+=B function (dense accum): GB_Cdense_accumB__land_int8
// C+=b function (dense accum): GB_Cdense_accumb__land_int8
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__land_int8
// C=scalar+B GB_bind1st__land_int8
// C=scalar+B' GB_bind1st_tran__land_int8
// C=A+scalar GB_bind2nd__land_int8
// C=A'+scalar GB_bind2nd_tran__land_int8
// C type: int8_t
// A type: int8_t
// B,b type: int8_t
// BinaryOp: cij = ((aij != 0) && (bij != 0))
#define GB_ATYPE \
int8_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int8_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int8_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int8_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = ((x != 0) && (y != 0)) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LAND || GxB_NO_INT8 || GxB_NO_LAND_INT8)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__land_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__land_int8
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__land_int8
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__land_int8
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__land_int8
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *GB_RESTRICT Cx = (int8_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__land_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__land_int8
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__land_int8
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t *Cx = (int8_t *) Cx_output ;
int8_t x = (*((int8_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = ((x != 0) && (bij != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__land_int8
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int8_t *Cx = (int8_t *) Cx_output ;
int8_t *Ax = (int8_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int8_t aij = Ax [p] ;
Cx [p] = ((aij != 0) && (y != 0)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((x != 0) && (aij != 0)) ; \
}
GrB_Info GB_bind1st_tran__land_int8
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t x = (*((const int8_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = ((aij != 0) && (y != 0)) ; \
}
GrB_Info GB_bind2nd_tran__land_int8
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
a1p.c | #define N 200000000
int a[N],b[N];
long long s=0;
main()
{
int i,j;
/* inicialitzacio, no en paral.lel */
#pragma omp parallel
{
#pragma omp for
for(i=0;i<N;i++)
{
a[i]=1;
b[i]=2;
}
#pragma omp for
for (i=0;i<N;i++)
b[i] += a[i];
printf("Valor i %d, de b[i] %d \n",i-1,b[i-1]);
}
for (i=0;i<N;i++)
s+=b[i];
printf("Valor %d, de b %d suma total: %ld\n",i-1,b[i-1],s);
}
|
pooling_layer.h | //Tencent is pleased to support the open source community by making FeatherCNN available.
//Copyright (C) 2018 THL A29 Limited, a Tencent company. All rights reserved.
//Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
//in compliance with the License. You may obtain a copy of the License at
//
//https://opensource.org/licenses/BSD-3-Clause
//
//Unless required by applicable law or agreed to in writing, software distributed
//under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
//CONDITIONS OF ANY KIND, either express or implied. See the License for the
//specific language governing permissions and limitations under the License.
#pragma once
#include "../feather_simple_generated.h"
#include "../layer.h"
#include <math.h>
#include <limits>
#define MAX(a,b) ((a)>(b))?(a):(b)
#define MIN(a,b) ((a)<(b))?(a):(b)
namespace feather
{
void ave_pool_inner_kernel(float* out, const float* in, const size_t ldin, const size_t kernel_h, const size_t kernel_w)
{
float total = 0.0;
for (size_t m = 0; m != kernel_h; ++m)
{
for (size_t n = 0; n != kernel_w; ++n)
{
size_t pos = m * ldin + n;
total += in[pos];
}
}
*out = total / kernel_h / kernel_w;
}
void max_pool_inner_kernel(float* out, const float* in, const size_t ldin, const size_t kernel_h, const size_t kernel_w)
{
float max = 0.0;
for (size_t m = 0; m != kernel_h; ++m)
{
for (size_t n = 0; n != kernel_w; ++n)
{
size_t pos = m * ldin + n;
max = (in[pos] > max) ? in[pos] : max;
}
}
*out = max;
}
class PoolingLayer : public Layer
{
public:
PoolingLayer(const LayerParameter *layer_param, const RuntimeParameter<float>* rt_param)
: stride_height(1),
stride_width(1),
Layer(layer_param, rt_param)
{
const PoolingParameter *pooling_param = layer_param->pooling_param();
kernel_height = pooling_param->kernel_h();
kernel_width = pooling_param->kernel_w();
pad_height = pooling_param->pad_h();
pad_width = pooling_param->pad_w();
stride_height = pooling_param->stride_h();
stride_width = pooling_param->stride_w();
stride_height = (stride_height <= 0) ? 1 : stride_height;
stride_width = (stride_width <= 0) ? 1 : stride_width;
global_pooling = pooling_param->global_pooling();
this->method = pooling_param->pool();
switch(this->method)
{
case PoolingParameter_::PoolMethod_MAX_:
_pool_inner_kernel = max_pool_inner_kernel;
break;
case PoolingParameter_::PoolMethod_AVE:
_pool_inner_kernel = ave_pool_inner_kernel;
break;
default:
fprintf(stderr, "Unsupported pool method\n");
}
printf("kernel (%ld %ld) pad (%ld %ld) stride (%ld %ld) global_pooling %d\n",
kernel_height, kernel_width, pad_height, pad_width, stride_height, stride_width, global_pooling);
}
int Forward()
{
//fprintf(stderr, "pooling output (%d %d)\n", output_height, output_width);
//printf("input shape %ld %ld %ld kernel shape %ld %ld stride %ld %ld\n", input_channels, input_height, input_width, kernel_height, kernel_width, stride_height, stride_width);
const float *input = _bottom_blobs[_bottom[0]]->data();
float *output = _top_blobs[_top[0]]->data();
float *p = output;
int slot = input_channels*output_height;
#pragma omp parallel for schedule(static) num_threads(num_threads)
// for (int u=0;u<slot;u++)
// {
for (int i=0; i<input_channels; ++i)
{
for (int j=0; j<output_height; j ++)
{
// int i=slot/output_height, j=slot%output_height;
float *p = output + i*output_height*output_width + j*output_width;
for(int l=0; l<output_width; l++) p[l] = (this->method != PoolingParameter_::PoolMethod_MAX_?0:-1*std::numeric_limits<float>::max()) ;
int tmp_pos = j*(int)stride_height - (int)pad_height;
int x_min = MAX(tmp_pos, 0);
int x_max = MIN((int)(tmp_pos+kernel_height), (int) input_height);
for(int x=x_min; x<x_max; ++x)
{
int xpos = i * input_height * input_width + x*input_width;
for (int k = 0; k<output_width; k ++)
{
float total = (this->method != PoolingParameter_::PoolMethod_MAX_?0:-1*std::numeric_limits<float>::max());
int counter=0;
int local_pos = k*(int)stride_width - (int)pad_width;
int y_min = MAX(local_pos, 0);
int y_max = MIN((int)(local_pos + kernel_width), (int) input_width);
for (int y=y_min; y < y_max; ++y)
{
float value = input[xpos + y];
if(this->method != PoolingParameter_::PoolMethod_MAX_) total += value, counter++;
else total = total>value?total:value;
}
if(this->method != PoolingParameter_::PoolMethod_MAX_)
p[k] += total / (counter) / kernel_height;
else p[k] = (p[k]>total) ? p[k]:total;
}
}
}
}
/*
#if 0
f(0)
#else
if(this->method == PoolingParameter_::PoolMethod_MAX_)
#endif
{
float f_minimal = std::numeric_limits<float>::max();
f_minimal = -f_minimal;
//printf("minimal float %f\n", f_minimal);
//Init output
for(int i = 0; i < output_channels * output_height * output_width; ++i)
{
output[i] = f_minimal;
}
const size_t img_size = input_height * input_width;
#pragma omp parallel for num_threads(num_threads) collapse(3)
for (size_t i = 0; i < output_channels; ++i)
{
for (size_t j = 0; j < output_height; ++j)
{
for(size_t u = 0; u < kernel_height; ++u)
{
int row = j * stride_height + u - pad_height;
if(row < 0 || row >= input_height)
continue;
for (size_t k = 0; k < output_width; ++k)
{
float* out_ptr = output + i * output_height * output_width + j * output_width + k;
float max = *out_ptr;
for(size_t v = 0; v < kernel_width; ++v)
{
int col = k * stride_height + v - pad_width;
if(col < 0 || col >= input_width)
continue;
const float* in_ptr = input + i * img_size + row * input_width + col;
float data = *in_ptr;
max = (max > data) ? max : data;
}
*out_ptr = max;
}
}
}
}
}
else
{
for (size_t i = 0; i < output_channels; ++i)
{
for (size_t j = 0; j < output_height; ++j)
{
for (size_t k = 0; k < output_width; ++k)
{
#if 0
float total = 0.0;
for (size_t m = 0; m != kernel_height; ++m)
{
for (size_t n = 0; n != kernel_width; ++n)
{
size_t pos = i * input_height* input_width + (j + m)* input_width + k + n;
total += input[pos];
}
}
*p++ = total / (kernel_height * kernel_width);
#else
size_t border_h = input_height - j * stride_height + pad_height;
size_t border_w = input_width - k * stride_width + pad_width;
size_t kernel_h = (kernel_height < border_h) ? kernel_height : border_h;
size_t kernel_w = (kernel_width < border_w) ? kernel_width : border_w;
//printf("pool shape %ld %ld %ld %ld %ld %ld %d %d\n", kernel_h, kernel_w, output_height, output_width, border_h, border_w, j, k);
int row = j * stride_height - pad_height;
int col = k * stride_width - pad_width;
if(row < 0)
{
kernel_h = kernel_height + row;
row = 0;
}
if(col < 0)
{
kernel_w = kernel_width + col;
col = 0;
}
size_t pos = i * input_height * input_width + row * input_width + col;
_pool_inner_kernel(p, input + pos, input_width, kernel_h, kernel_w);
++p;
#endif
}
}
}
}
*/
return 0;
}
int GenerateTopBlobs()
{
//Only accept a single bottom blob.
const Blob<float> *bottom_blob = _bottom_blobs[_bottom[0]];
input_height = bottom_blob->height();
input_width = bottom_blob->width();
input_channels = bottom_blob->channels();
//printf("layer %s\n", _name.c_str());
//printf("input %lu %lu %lu\n", input_channels, input_height, input_width);
if (global_pooling)
{
kernel_height = input_height;
kernel_width = input_width;
output_height = 1;
output_width = 1;
output_channels = input_channels;
}
else
{
//General pooling.
output_channels = input_channels;
output_height = static_cast<int>(ceil(static_cast<float>(input_height + 2 * pad_height - kernel_height) / stride_height)) + 1;
output_width = static_cast<int>(ceil(static_cast<float>(input_width + 2 * pad_width - kernel_width) / stride_width)) + 1;
}
_top_blobs[_top[0]] = new Blob<float>(1, output_channels, output_height, output_width);
_top_blobs[_top[0]]->Alloc();
//_top_blobs[_top[0]]->PrintBlobInfo();
return 0;
}
private:
size_t input_height;
size_t input_width;
size_t input_channels;
size_t output_height;
size_t output_width;
size_t output_channels;
size_t pad_height;
size_t pad_width;
size_t kernel_height;
size_t kernel_width;
size_t stride_height;
size_t stride_width;
bool global_pooling;
PoolingParameter_::PoolMethod method;
void (*_pool_inner_kernel)(float* out, const float* in, const size_t ldin, const size_t kernel_h, const size_t kernel_w);
};
};
|
omp-parallel-single-nowait.c | #include <omp.h>
#include <stdio.h>
#define LEN 20
int main(void)
{
int num[LEN] = {0}, k=0;
#pragma omp parallel
#pragma omp single nowait
for (k=0; k<LEN; k++)
{
num[k] = omp_get_thread_num();
}
return 0;
}
|
gemv_x_csr.c | #include "alphasparse/kernel.h"
#include "alphasparse/util.h"
#include "alphasparse/opt.h"
#ifdef S
#include <immintrin.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
#if defined (__AVX512__) && defined(S)
static float gemv_csr_kernel_doti_simd512_unroll4(const ALPHA_INT ns,const float* x,const ALPHA_INT* indx,const float* y){
ALPHA_INT ns64 = (ns >> 6) << 6;
__m512 tmp0,tmp1,tmp2,tmp3;
__m512 vy0,vy1,vy2,vy3;
__m512 vx0,vx1,vx2,vx3;
__m512i vindex0,vindex1,vindex2,vindex3;
float tmp = 0;
tmp0 = _mm512_setzero();
tmp1 = _mm512_setzero();
tmp2 = _mm512_setzero();
tmp3 = _mm512_setzero();
ALPHA_INT i;
for(i = 0; i < ns64;i+=64){
vx0 = _mm512_loadu_ps(&x[i]);
vx1 = _mm512_loadu_ps(&x[i+16]);
vx2 = _mm512_loadu_ps(&x[i+32]);
vx3 = _mm512_loadu_ps(&x[i+48]);
vindex0 = _mm512_loadu_si512((void *)&indx[i]);
vindex1 = _mm512_loadu_si512((void *)&indx[i+16]);
vindex2 = _mm512_loadu_si512((void *)&indx[i+32]);
vindex3 = _mm512_loadu_si512((void *)&indx[i+48]);
vy0 = _mm512_i32gather_ps(vindex0,y,4);
vy1 = _mm512_i32gather_ps(vindex1,y,4);
vy2 = _mm512_i32gather_ps(vindex2,y,4);
vy3 = _mm512_i32gather_ps(vindex3,y,4);
tmp0 = _mm512_fmadd_ps(vx0,vy0,tmp0);
tmp1 = _mm512_fmadd_ps(vx1,vy1,tmp1);
tmp2 = _mm512_fmadd_ps(vx2,vy2,tmp2);
tmp3 = _mm512_fmadd_ps(vx3,vy3,tmp3);
}
for(; i< ns;++i){
tmp += x[i] * y[indx[i]];
}
tmp += _mm512_reduce_add_ps(tmp0) + _mm512_reduce_add_ps(tmp1) + _mm512_reduce_add_ps(tmp2) + _mm512_reduce_add_ps(tmp3);
return tmp;
}
#endif
//
//static float gemv_s_csr_kernel_doti_simd512(const ALPHA_INT ns,const float* x,const ALPHA_INT* indx,const float* y){
// ALPHA_INT ns16 = (ns >> 4) << 4;
// __m512 tmp,vy,vx;
// __m512i vindex;
// float tmp0 = 0;
// tmp = _mm512_setzero();
// ALPHA_INT i;
// for(i = 0; i < ns16;i+=16){
// vx = _mm512_loadu_ps(&x[i]);
// vindex = _mm512_loadu_epi32(&indx[i]);
// vy = _mm512_i32gather_ps(vindex,y,4);
// tmp = _mm512_fmadd_ps(vx,vy,tmp);
// }
// for(; i< ns;++i){
// tmp0 += x[i] * y[indx[i]];
// }
// tmp0 += _mm512_reduce_add_ps(tmp);
// return tmp0;
//}
// static float gemv_s_csr_kernel_doti_unroll4(const ALPHA_INT ns,const float* x,const ALPHA_INT* indx,const float* y){
// ALPHA_INT ns4 = ns & ~3;
// ALPHA_INT i;
// float tmp0 = 0.f;
// float tmp1 = 0.f;
// float tmp2 = 0.f;
// float tmp3 = 0.f;
// for(i = 0; i < ns4;i+=4){
// tmp0 += x[i] * y[indx[i]];
// tmp1 += x[i+1] * y[indx[i+1]];
// tmp2 += x[i+2] * y[indx[i+2]];
// tmp3 += x[i+3] * y[indx[i+3]];
// }
// for(; i< ns;++i){
// tmp0 += x[i] * y[indx[i]];
// }
// return ((tmp0 + tmp1) + (tmp2 + tmp3));
// }
// float gemv_s_csr_kernel_doti(const ALPHA_INT ns,const float* x,const ALPHA_INT* indx,const float* y){
// float tmp0 = 0.f;
// for(ALPHA_INT i = 0; i< ns;++i){
// tmp0 += x[i] * y[indx[i]];
// }
// return tmp0;
// }
// alphasparse_status_t gemv_s_csr(const float alpha,const spmat_csr_s_t* A,const float* x,const float beta,float* y)
// {
// ALPHA_INT m = A->rows;
// ALPHA_INT num_threads = alpha_get_thread_num();
// ALPHA_INT partition[num_threads + 1];
// balanced_partition_row_by_nnz(A->rows_end, m, num_threads, partition);
// #ifdef _OPENMP
// #pragma omp parallel num_threads(num_threads)
// #endif
// {
// ALPHA_INT tid = alpha_get_thread_id();
// ALPHA_INT local_m_s = partition[tid];
// ALPHA_INT local_m_e = partition[tid + 1];
// for (ALPHA_INT i = local_m_s; i < local_m_e; i++)
// {
// y[i] *= beta;
// ALPHA_INT pks = A->rows_start[i];
// ALPHA_INT pke = A->rows_end[i];
// ALPHA_INT pkl = pke - pks;
// // float tmp = gemv_s_csr_kernel_doti(pkl,&A->values[pks],&A->col_indx[pks],x);
// //float tmp = gemv_s_csr_kernel_doti_unroll4(pkl,&A->values[pks],&A->col_indx[pks],x);
// //float tmp = gemv_s_csr_kernel_doti_simd512(pkl,&A->values[pks],&A->col_indx[pks],x);
// float tmp = gemv_s_csr_kernel_doti_simd512_unroll4(pkl,&A->values[pks],&A->col_indx[pks],x);
// y[i] += alpha * tmp;
// }
// }
// return ALPHA_SPARSE_STATUS_SUCCESS;
// }
static ALPHA_Number gemv_kernel_doti_unroll4(const ALPHA_INT ns, const ALPHA_Number *x, const ALPHA_INT *indx, const ALPHA_Number *y)
{
ALPHA_INT ns4 = ((ns >> 2) << 2);
ALPHA_INT i;
ALPHA_Number tmp0, tmp1, tmp2, tmp3;
alpha_setzero(tmp0);
alpha_setzero(tmp1);
alpha_setzero(tmp2);
alpha_setzero(tmp3);
for (i = 0; i < ns4; i += 4)
{
alpha_madde(tmp0, x[i], y[indx[i]]);
alpha_madde(tmp1, x[i + 1], y[indx[i + 1]]);
alpha_madde(tmp2, x[i + 2], y[indx[i + 2]]);
alpha_madde(tmp3, x[i + 3], y[indx[i + 3]]);
}
for (; i < ns; ++i)
{
alpha_madde(tmp0, x[i], y[indx[i]]);
}
alpha_adde(tmp0, tmp1);
alpha_adde(tmp2, tmp3);
alpha_adde(tmp0, tmp2);
return tmp0;
}
static alphasparse_status_t
gemv_csr_unroll4(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y,
ALPHA_INT lrs,
ALPHA_INT lre)
{
for (ALPHA_INT i = lrs; i < lre; i++)
{
ALPHA_INT pks = A->rows_start[i];
ALPHA_INT pke = A->rows_end[i];
ALPHA_INT pkl = pke - pks;
#if defined (__AVX512__) && defined(S)
float tmp = gemv_csr_kernel_doti_simd512_unroll4(pkl,&A->values[pks],&A->col_indx[pks],x);
#else
ALPHA_Number tmp = gemv_kernel_doti_unroll4(pkl, &A->values[pks], &A->col_indx[pks], x);
#endif
// #else
// ALPHA_Number tmp = gemv_kernel_doti_unroll4(pkl, &A->values[pks], &A->col_indx[pks], x);
// #endif
alpha_mule(y[i], beta);
alpha_madde(y[i], alpha, tmp);
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
static alphasparse_status_t
gemv_csr_omp(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *A,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
ALPHA_INT m = A->rows;
ALPHA_INT num_threads = alpha_get_thread_num();
ALPHA_INT partition[num_threads + 1];
balanced_partition_row_by_nnz(A->rows_end, m, num_threads, partition);
#ifdef _OPENMP
#pragma omp parallel num_threads(num_threads)
#endif
{
ALPHA_INT tid = alpha_get_thread_id();
ALPHA_INT local_m_s = partition[tid];
ALPHA_INT local_m_e = partition[tid + 1];
gemv_csr_unroll4(alpha, A, x, beta, y, local_m_s, local_m_e);
}
return ALPHA_SPARSE_STATUS_SUCCESS;
}
alphasparse_status_t
ONAME(const ALPHA_Number alpha,
const ALPHA_SPMAT_CSR *mat,
const ALPHA_Number *x,
const ALPHA_Number beta,
ALPHA_Number *y)
{
return gemv_csr_omp(alpha, mat, x, beta, y);
}
|
test_zgemm_nopack.c | /**
*
* @file
*
* PLASMA is a software package provided by:
* University of Tennessee, US,
* University of Manchester, UK.
*
* @precisions normal z -> s d c
*
**/
#include "test.h"
#include "flops.h"
#include "plasma.h"
#include "core_lapack.h"
#include <assert.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <math.h>
#include <omp.h>
#define COMPLEX
/***************************************************************************//**
*
* @brief Tests ZGEMM.
*
* @param[in,out] param - array of parameters
* @param[in] run - whether to run test
*
* Sets used flags in param indicating parameters that are used.
* If run is true, also runs test and stores output parameters.
******************************************************************************/
void test_zgemm_nopack(param_value_t param[], bool run)
{
//================================================================
// Mark which parameters are used.
//================================================================
param[PARAM_TRANSA ].used = true;
param[PARAM_TRANSB ].used = true;
param[PARAM_DIM ].used = PARAM_USE_M | PARAM_USE_N | PARAM_USE_K;
param[PARAM_ALPHA ].used = true;
param[PARAM_BETA ].used = true;
param[PARAM_PADA ].used = true;
param[PARAM_PADB ].used = true;
param[PARAM_PADC ].used = true;
param[PARAM_NB ].used = true;
if (! run)
return;
//================================================================
// Set parameters.
//================================================================
plasma_enum_t transa = plasma_trans_const(param[PARAM_TRANSA].c);
plasma_enum_t transb = plasma_trans_const(param[PARAM_TRANSB].c);
int m = param[PARAM_DIM].dim.m;
int n = param[PARAM_DIM].dim.n;
int k = param[PARAM_DIM].dim.k;
int Am, An;
int Bm, Bn;
int Cm, Cn;
if (transa == PlasmaNoTrans) {
Am = m;
An = k;
}
else {
Am = k;
An = m;
}
if (transb == PlasmaNoTrans) {
Bm = k;
Bn = n;
}
else {
Bm = n;
Bn = k;
}
Cm = m;
Cn = n;
int lda = imax(1, Am + param[PARAM_PADA].i);
int ldb = imax(1, Bm + param[PARAM_PADB].i);
int ldc = imax(1, Cm + param[PARAM_PADC].i);
int test = param[PARAM_TEST].c == 'y';
double eps = LAPACKE_dlamch('E');
#ifdef COMPLEX
plasma_complex64_t alpha = param[PARAM_ALPHA].z;
plasma_complex64_t beta = param[PARAM_BETA].z;
#else
double alpha = creal(param[PARAM_ALPHA].z);
double beta = creal(param[PARAM_BETA].z);
#endif
//================================================================
// Set tuning parameters.
//================================================================
plasma_set(PlasmaTuning, PlasmaDisabled);
plasma_set(PlasmaNb, param[PARAM_NB].i);
//================================================================
// Allocate and initialize arrays.
//================================================================
plasma_complex64_t *A =
(plasma_complex64_t*)malloc((size_t)lda*An*sizeof(plasma_complex64_t));
assert(A != NULL);
plasma_complex64_t *B =
(plasma_complex64_t*)malloc((size_t)ldb*Bn*sizeof(plasma_complex64_t));
assert(B != NULL);
plasma_complex64_t *C =
(plasma_complex64_t*)malloc((size_t)ldc*Cn*sizeof(plasma_complex64_t));
assert(C != NULL);
int seed[] = {0, 0, 0, 1};
lapack_int retval;
retval = LAPACKE_zlarnv(1, seed, (size_t)lda*An, A);
assert(retval == 0);
retval = LAPACKE_zlarnv(1, seed, (size_t)ldb*Bn, B);
assert(retval == 0);
retval = LAPACKE_zlarnv(1, seed, (size_t)ldc*Cn, C);
assert(retval == 0);
plasma_complex64_t *Cref = NULL;
if (test) {
Cref = (plasma_complex64_t*)malloc(
(size_t)ldc*Cn*sizeof(plasma_complex64_t));
assert(Cref != NULL);
memcpy(Cref, C, (size_t)ldc*Cn*sizeof(plasma_complex64_t));
}
//================================================================
// Run and time PLASMA.
//================================================================
/* plasma_zgemm(
transa, transb,
m, n, k,
alpha, A, lda,
B, ldb,
beta, C, ldc);
*/
// Get PLASMA context.
plasma_context_t *plasma = plasma_context_self();
if (plasma == NULL) {
plasma_error("PLASMA not initialized");
return PlasmaErrorNotInitialized;
}
// Check input arguments.
if ((transa != PlasmaNoTrans) &&
(transa != PlasmaTrans) &&
(transa != PlasmaConjTrans)) {
plasma_error("illegal value of transa");
return -1;
}
if ((transb != PlasmaNoTrans) &&
(transb != PlasmaTrans) &&
(transb != PlasmaConjTrans)) {
plasma_error("illegal value of transb");
return -2;
}
if (m < 0) {
plasma_error("illegal value of m");
return -3;
}
if (n < 0) {
plasma_error("illegal value of n");
return -4;
}
if (k < 0) {
plasma_error("illegal value of k");
return -5;
}
int am, an;
int bm, bn;
if (transa == PlasmaNoTrans) {
am = m;
an = k;
}
else {
am = k;
an = m;
}
if (transb == PlasmaNoTrans) {
bm = k;
bn = n;
}
else {
bm = n;
bn = k;
}
if (lda < imax(1, am)) {
plasma_error("illegal value of lda");
return -8;
}
if (ldb < imax(1, bm)) {
plasma_error("illegal value of ldb");
return -10;
}
if (ldc < imax(1, m)) {
plasma_error("illegal value of ldc");
return -13;
}
// quick return
if (m == 0 || n == 0 || ((alpha == 0.0 || k == 0) && beta == 1.0))
return PlasmaSuccess;
// Tune parameters.
if (plasma->tuning)
plasma_tune_gemm(plasma, PlasmaComplexDouble, m, n, k);
// Set tiling parameters.
int nb = plasma->nb;
// Create tile matrices.
plasma_desc_t AA;
plasma_desc_t BB;
plasma_desc_t CC;
int retval1;
retval1 = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
am, an, 0, 0, am, an, &AA);
if (retval1 != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
return retval1;
}
retval1 = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
bm, bn, 0, 0, bm, bn, &BB);
if (retval1 != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AA);
return retval1;
}
retval1 = plasma_desc_general_create(PlasmaComplexDouble, nb, nb,
m, n, 0, 0, m, n, &CC);
if (retval1 != PlasmaSuccess) {
plasma_error("plasma_desc_general_create() failed");
plasma_desc_destroy(&AA);
plasma_desc_destroy(&BB);
return retval1;
}
// Initialize sequence.
plasma_sequence_t sequence;
retval1 = plasma_sequence_init(&sequence);
// Initialize request.
plasma_request_t request;
retval1 = plasma_request_init(&request);
// asynchronous block
#pragma omp parallel
#pragma omp master
{
// Translate to tile layout.
plasma_omp_zge2desc(A, lda, AA, &sequence, &request);
plasma_omp_zge2desc(B, ldb, BB, &sequence, &request);
plasma_omp_zge2desc(C, ldc, CC, &sequence, &request);
}
plasma_time_t start = omp_get_wtime();
#pragma omp parallel
#pragma omp master
{
// Call the tile async function.
plasma_omp_zgemm(transa, transb,
alpha, AA,
BB,
beta, CC,
&sequence, &request);
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(CC, C, ldc, &sequence, &request);
}
// implicit synchronization
plasma_time_t stop = omp_get_wtime();
plasma_time_t time = stop-start;
param[PARAM_TIME].d = time;
param[PARAM_GFLOPS].d = flops_zgemm(m, n, k) / time / 1e9;
#pragma omp parallel
#pragma omp master
{
// Translate back to LAPACK layout.
plasma_omp_zdesc2ge(CC, C, ldc, &sequence, &request);
}
// Free matrices in tile layout.
plasma_desc_destroy(&AA);
plasma_desc_destroy(&BB);
plasma_desc_destroy(&CC);
//plasma_time_t time = stop-start;
//param[PARAM_TIME].d = time;
//param[PARAM_GFLOPS].d = flops_zgemm(m, n, k) / time / 1e9;
//================================================================
// Test results by comparing to a reference implementation.
//================================================================
if (test) {
// |R - R_ref|_p < gamma_{k+2} * |alpha| * |A|_p * |B|_p +
// gamma_2 * |beta| * |C|_p
// holds component-wise or with |.|_p as 1, inf, or Frobenius norm.
// gamma_k = k*eps / (1 - k*eps), but we use
// gamma_k = sqrt(k)*eps as a statistical average case.
// Using 3*eps covers complex arithmetic.
// See Higham, Accuracy and Stability of Numerical Algorithms, ch 2-3.
double work[1];
double Anorm = LAPACKE_zlange_work(
LAPACK_COL_MAJOR, 'F', Am, An, A, lda, work);
double Bnorm = LAPACKE_zlange_work(
LAPACK_COL_MAJOR, 'F', Bm, Bn, B, ldb, work);
double Cnorm = LAPACKE_zlange_work(
LAPACK_COL_MAJOR, 'F', Cm, Cn, Cref, ldc, work);
cblas_zgemm(
CblasColMajor,
(CBLAS_TRANSPOSE)transa, (CBLAS_TRANSPOSE)transb,
m, n, k,
CBLAS_SADDR(alpha), A, lda,
B, ldb,
CBLAS_SADDR(beta), Cref, ldc);
plasma_complex64_t zmone = -1.0;
cblas_zaxpy((size_t)ldc*Cn, CBLAS_SADDR(zmone), Cref, 1, C, 1);
double error = LAPACKE_zlange_work(
LAPACK_COL_MAJOR, 'F', Cm, Cn, C, ldc, work);
double normalize = sqrt((double)k+2) * cabs(alpha) * Anorm * Bnorm
+ 2 * cabs(beta) * Cnorm;
if (normalize != 0)
error /= normalize;
param[PARAM_ERROR].d = error;
param[PARAM_SUCCESS].i = error < 3*eps;
}
//================================================================
// Free arrays.
//================================================================
free(A);
free(B);
free(C);
if (test)
free(Cref);
}
|
acado_integrator.c | /*
* This file was auto-generated using the ACADO Toolkit.
*
* While ACADO Toolkit is free software released under the terms of
* the GNU Lesser General Public License (LGPL), the generated code
* as such remains the property of the user who used ACADO Toolkit
* to generate this code. In particular, user dependent data of the code
* do not inherit the GNU LGPL license. On the other hand, parts of the
* generated code that are a direct copy of source code from the
* ACADO Toolkit or the software tools it is based on, remain, as derived
* work, automatically covered by the LGPL license.
*
* ACADO Toolkit is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
*
*/
#include "acado_common.h"
real_t rk_dim9_swap;
/** Column vector of size: 9 */
real_t rk_dim9_bPerm[ 9 ];
/** Column vector of size: 75 */
real_t auxVar[ 75 ];
real_t rk_ttt;
/** Row vector of size: 21 */
real_t rk_xxx[ 21 ];
/** Column vector of size: 9 */
real_t rk_kkk[ 9 ];
/** Matrix of size: 9 x 9 (row major format) */
real_t rk_A[ 81 ];
/** Column vector of size: 9 */
real_t rk_b[ 9 ];
/** Row vector of size: 9 */
int rk_dim9_perm[ 9 ];
/** Column vector of size: 9 */
real_t rk_rhsTemp[ 9 ];
/** Row vector of size: 108 */
real_t rk_diffsTemp2[ 108 ];
/** Column vector of size: 9 */
real_t rk_diffK[ 9 ];
/** Matrix of size: 9 x 12 (row major format) */
real_t rk_diffsPrev2[ 108 ];
/** Matrix of size: 9 x 12 (row major format) */
real_t rk_diffsNew2[ 108 ];
#pragma omp threadprivate( auxVar, rk_ttt, rk_xxx, rk_kkk, rk_diffK, rk_rhsTemp, rk_dim9_perm, rk_A, rk_b, rk_diffsPrev2, rk_diffsNew2, rk_diffsTemp2, rk_dim9_swap, rk_dim9_bPerm )
void acado_rhs(const real_t* in, real_t* out)
{
const real_t* xd = in;
const real_t* u = in + 9;
const real_t* od = in + 12;
/* Vector of auxiliary variables; number of elements: 31. */
real_t* a = auxVar;
/* Compute intermediate quantities: */
a[0] = (cos(xd[3]));
a[1] = (cos(xd[5]));
a[2] = (sin(xd[4]));
a[3] = (sin(xd[3]));
a[4] = (sin(xd[5]));
a[5] = (sin(xd[4]));
a[6] = (cos(xd[4]));
a[7] = (cos(xd[5]));
a[8] = (cos(xd[4]));
a[9] = (sin(xd[5]));
a[10] = (((((a[5]*od[4])*u[2])*xd[2])+((((a[6]*a[7])*od[4])*u[2])*xd[0]))-((((a[8]*od[4])*a[9])*u[2])*xd[1]));
a[11] = (cos(xd[3]));
a[12] = (sin(xd[4]));
a[13] = (sin(xd[5]));
a[14] = (cos(xd[5]));
a[15] = (sin(xd[3]));
a[16] = (cos(xd[3]));
a[17] = (sin(xd[5]));
a[18] = (cos(xd[5]));
a[19] = (sin(xd[4]));
a[20] = (sin(xd[3]));
a[21] = (cos(xd[3]));
a[22] = (cos(xd[5]));
a[23] = (sin(xd[4]));
a[24] = (sin(xd[3]));
a[25] = (sin(xd[5]));
a[26] = (cos(xd[4]));
a[27] = (sin(xd[3]));
a[28] = (((((((a[16]*a[17])-((a[18]*a[19])*a[20]))*od[5])*u[2])*xd[0])-(((((a[21]*a[22])+((a[23]*a[24])*a[25]))*od[5])*u[2])*xd[1]))-((((a[26]*od[5])*a[27])*u[2])*xd[2]));
a[29] = (cos(xd[4]));
a[30] = (cos(xd[3]));
/* Compute outputs: */
out[0] = ((((((a[0]*a[1])*a[2])+(a[3]*a[4]))*u[2])-a[10])+od[6]);
out[1] = ((((((a[11]*a[12])*a[13])-(a[14]*a[15]))*u[2])-a[28])+od[7]);
out[2] = (((real_t)(-9.8065999999999995e+00)+((a[29]*a[30])*u[2]))+od[8]);
out[3] = (((od[1]*u[0])-xd[3])/od[0]);
out[4] = (((od[3]*u[1])-xd[4])/od[2]);
out[5] = (real_t)(0.0000000000000000e+00);
out[6] = xd[0];
out[7] = xd[1];
out[8] = xd[2];
}
void acado_diffs(const real_t* in, real_t* out)
{
const real_t* xd = in;
const real_t* u = in + 9;
const real_t* od = in + 12;
/* Vector of auxiliary variables; number of elements: 75. */
real_t* a = auxVar;
/* Compute intermediate quantities: */
a[0] = (cos(xd[4]));
a[1] = (cos(xd[5]));
a[2] = (((a[0]*a[1])*od[4])*u[2]);
a[3] = (cos(xd[4]));
a[4] = (sin(xd[5]));
a[5] = ((real_t)(0.0000000000000000e+00)-(((a[3]*od[4])*a[4])*u[2]));
a[6] = (sin(xd[4]));
a[7] = ((a[6]*od[4])*u[2]);
a[8] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3])));
a[9] = (cos(xd[5]));
a[10] = (sin(xd[4]));
a[11] = (cos(xd[3]));
a[12] = (sin(xd[5]));
a[13] = (cos(xd[3]));
a[14] = (cos(xd[4]));
a[15] = (cos(xd[4]));
a[16] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[4])));
a[17] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[4])));
a[18] = (((((a[15]*od[4])*u[2])*xd[2])+((((a[16]*a[1])*od[4])*u[2])*xd[0]))-((((a[17]*od[4])*a[4])*u[2])*xd[1]));
a[19] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5])));
a[20] = (sin(xd[3]));
a[21] = (cos(xd[5]));
a[22] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5])));
a[23] = (cos(xd[5]));
a[24] = (((((a[0]*a[22])*od[4])*u[2])*xd[0])-((((a[3]*od[4])*a[23])*u[2])*xd[1]));
a[25] = ((((a[6]*od[4])*xd[2])+(((a[0]*a[1])*od[4])*xd[0]))-(((a[3]*od[4])*a[4])*xd[1]));
a[26] = (cos(xd[3]));
a[27] = (sin(xd[5]));
a[28] = (cos(xd[5]));
a[29] = (sin(xd[4]));
a[30] = (sin(xd[3]));
a[31] = ((((a[26]*a[27])-((a[28]*a[29])*a[30]))*od[5])*u[2]);
a[32] = (cos(xd[3]));
a[33] = (cos(xd[5]));
a[34] = (sin(xd[4]));
a[35] = (sin(xd[3]));
a[36] = (sin(xd[5]));
a[37] = ((real_t)(0.0000000000000000e+00)-((((a[32]*a[33])+((a[34]*a[35])*a[36]))*od[5])*u[2]));
a[38] = (cos(xd[4]));
a[39] = (sin(xd[3]));
a[40] = ((real_t)(0.0000000000000000e+00)-(((a[38]*od[5])*a[39])*u[2]));
a[41] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3])));
a[42] = (sin(xd[4]));
a[43] = (sin(xd[5]));
a[44] = (cos(xd[5]));
a[45] = (cos(xd[3]));
a[46] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3])));
a[47] = (cos(xd[3]));
a[48] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3])));
a[49] = (cos(xd[3]));
a[50] = (cos(xd[3]));
a[51] = (((((((a[46]*a[27])-((a[28]*a[29])*a[47]))*od[5])*u[2])*xd[0])-(((((a[48]*a[33])+((a[34]*a[49])*a[36]))*od[5])*u[2])*xd[1]))-((((a[38]*od[5])*a[50])*u[2])*xd[2]));
a[52] = (cos(xd[3]));
a[53] = (cos(xd[4]));
a[54] = (cos(xd[4]));
a[55] = (cos(xd[4]));
a[56] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[4])));
a[57] = (((((((real_t)(0.0000000000000000e+00)-((a[28]*a[54])*a[30]))*od[5])*u[2])*xd[0])-(((((a[55]*a[35])*a[36])*od[5])*u[2])*xd[1]))-((((a[56]*od[5])*a[39])*u[2])*xd[2]));
a[58] = (cos(xd[5]));
a[59] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5])));
a[60] = (sin(xd[3]));
a[61] = (cos(xd[5]));
a[62] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5])));
a[63] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[5])));
a[64] = (cos(xd[5]));
a[65] = ((((((a[26]*a[61])-((a[62]*a[29])*a[30]))*od[5])*u[2])*xd[0])-(((((a[32]*a[63])+((a[34]*a[35])*a[64]))*od[5])*u[2])*xd[1]));
a[66] = ((((((a[26]*a[27])-((a[28]*a[29])*a[30]))*od[5])*xd[0])-((((a[32]*a[33])+((a[34]*a[35])*a[36]))*od[5])*xd[1]))-(((a[38]*od[5])*a[39])*xd[2]));
a[67] = (cos(xd[4]));
a[68] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[3])));
a[69] = ((real_t)(-1.0000000000000000e+00)*(sin(xd[4])));
a[70] = (cos(xd[3]));
a[71] = ((real_t)(1.0000000000000000e+00)/od[0]);
a[72] = ((real_t)(1.0000000000000000e+00)/od[0]);
a[73] = ((real_t)(1.0000000000000000e+00)/od[2]);
a[74] = ((real_t)(1.0000000000000000e+00)/od[2]);
/* Compute outputs: */
out[0] = ((real_t)(0.0000000000000000e+00)-a[2]);
out[1] = ((real_t)(0.0000000000000000e+00)-a[5]);
out[2] = ((real_t)(0.0000000000000000e+00)-a[7]);
out[3] = ((((a[8]*a[9])*a[10])+(a[11]*a[12]))*u[2]);
out[4] = ((((a[13]*a[9])*a[14])*u[2])-a[18]);
out[5] = (((((a[13]*a[19])*a[10])+(a[20]*a[21]))*u[2])-a[24]);
out[6] = (real_t)(0.0000000000000000e+00);
out[7] = (real_t)(0.0000000000000000e+00);
out[8] = (real_t)(0.0000000000000000e+00);
out[9] = (real_t)(0.0000000000000000e+00);
out[10] = (real_t)(0.0000000000000000e+00);
out[11] = ((((a[13]*a[9])*a[10])+(a[20]*a[12]))-a[25]);
out[12] = ((real_t)(0.0000000000000000e+00)-a[31]);
out[13] = ((real_t)(0.0000000000000000e+00)-a[37]);
out[14] = ((real_t)(0.0000000000000000e+00)-a[40]);
out[15] = (((((a[41]*a[42])*a[43])-(a[44]*a[45]))*u[2])-a[51]);
out[16] = ((((a[52]*a[53])*a[43])*u[2])-a[57]);
out[17] = (((((a[52]*a[42])*a[58])-(a[59]*a[60]))*u[2])-a[65]);
out[18] = (real_t)(0.0000000000000000e+00);
out[19] = (real_t)(0.0000000000000000e+00);
out[20] = (real_t)(0.0000000000000000e+00);
out[21] = (real_t)(0.0000000000000000e+00);
out[22] = (real_t)(0.0000000000000000e+00);
out[23] = ((((a[52]*a[42])*a[43])-(a[44]*a[60]))-a[66]);
out[24] = (real_t)(0.0000000000000000e+00);
out[25] = (real_t)(0.0000000000000000e+00);
out[26] = (real_t)(0.0000000000000000e+00);
out[27] = ((a[67]*a[68])*u[2]);
out[28] = ((a[69]*a[70])*u[2]);
out[29] = (real_t)(0.0000000000000000e+00);
out[30] = (real_t)(0.0000000000000000e+00);
out[31] = (real_t)(0.0000000000000000e+00);
out[32] = (real_t)(0.0000000000000000e+00);
out[33] = (real_t)(0.0000000000000000e+00);
out[34] = (real_t)(0.0000000000000000e+00);
out[35] = (a[67]*a[70]);
out[36] = (real_t)(0.0000000000000000e+00);
out[37] = (real_t)(0.0000000000000000e+00);
out[38] = (real_t)(0.0000000000000000e+00);
out[39] = (((real_t)(0.0000000000000000e+00)-(real_t)(1.0000000000000000e+00))*a[71]);
out[40] = (real_t)(0.0000000000000000e+00);
out[41] = (real_t)(0.0000000000000000e+00);
out[42] = (real_t)(0.0000000000000000e+00);
out[43] = (real_t)(0.0000000000000000e+00);
out[44] = (real_t)(0.0000000000000000e+00);
out[45] = (od[1]*a[72]);
out[46] = (real_t)(0.0000000000000000e+00);
out[47] = (real_t)(0.0000000000000000e+00);
out[48] = (real_t)(0.0000000000000000e+00);
out[49] = (real_t)(0.0000000000000000e+00);
out[50] = (real_t)(0.0000000000000000e+00);
out[51] = (real_t)(0.0000000000000000e+00);
out[52] = (((real_t)(0.0000000000000000e+00)-(real_t)(1.0000000000000000e+00))*a[73]);
out[53] = (real_t)(0.0000000000000000e+00);
out[54] = (real_t)(0.0000000000000000e+00);
out[55] = (real_t)(0.0000000000000000e+00);
out[56] = (real_t)(0.0000000000000000e+00);
out[57] = (real_t)(0.0000000000000000e+00);
out[58] = (od[3]*a[74]);
out[59] = (real_t)(0.0000000000000000e+00);
out[60] = (real_t)(0.0000000000000000e+00);
out[61] = (real_t)(0.0000000000000000e+00);
out[62] = (real_t)(0.0000000000000000e+00);
out[63] = (real_t)(0.0000000000000000e+00);
out[64] = (real_t)(0.0000000000000000e+00);
out[65] = (real_t)(0.0000000000000000e+00);
out[66] = (real_t)(0.0000000000000000e+00);
out[67] = (real_t)(0.0000000000000000e+00);
out[68] = (real_t)(0.0000000000000000e+00);
out[69] = (real_t)(0.0000000000000000e+00);
out[70] = (real_t)(0.0000000000000000e+00);
out[71] = (real_t)(0.0000000000000000e+00);
out[72] = (real_t)(1.0000000000000000e+00);
out[73] = (real_t)(0.0000000000000000e+00);
out[74] = (real_t)(0.0000000000000000e+00);
out[75] = (real_t)(0.0000000000000000e+00);
out[76] = (real_t)(0.0000000000000000e+00);
out[77] = (real_t)(0.0000000000000000e+00);
out[78] = (real_t)(0.0000000000000000e+00);
out[79] = (real_t)(0.0000000000000000e+00);
out[80] = (real_t)(0.0000000000000000e+00);
out[81] = (real_t)(0.0000000000000000e+00);
out[82] = (real_t)(0.0000000000000000e+00);
out[83] = (real_t)(0.0000000000000000e+00);
out[84] = (real_t)(0.0000000000000000e+00);
out[85] = (real_t)(1.0000000000000000e+00);
out[86] = (real_t)(0.0000000000000000e+00);
out[87] = (real_t)(0.0000000000000000e+00);
out[88] = (real_t)(0.0000000000000000e+00);
out[89] = (real_t)(0.0000000000000000e+00);
out[90] = (real_t)(0.0000000000000000e+00);
out[91] = (real_t)(0.0000000000000000e+00);
out[92] = (real_t)(0.0000000000000000e+00);
out[93] = (real_t)(0.0000000000000000e+00);
out[94] = (real_t)(0.0000000000000000e+00);
out[95] = (real_t)(0.0000000000000000e+00);
out[96] = (real_t)(0.0000000000000000e+00);
out[97] = (real_t)(0.0000000000000000e+00);
out[98] = (real_t)(1.0000000000000000e+00);
out[99] = (real_t)(0.0000000000000000e+00);
out[100] = (real_t)(0.0000000000000000e+00);
out[101] = (real_t)(0.0000000000000000e+00);
out[102] = (real_t)(0.0000000000000000e+00);
out[103] = (real_t)(0.0000000000000000e+00);
out[104] = (real_t)(0.0000000000000000e+00);
out[105] = (real_t)(0.0000000000000000e+00);
out[106] = (real_t)(0.0000000000000000e+00);
out[107] = (real_t)(0.0000000000000000e+00);
}
void acado_solve_dim9_triangular( real_t* const A, real_t* const b )
{
b[8] = b[8]/A[80];
b[7] -= + A[71]*b[8];
b[7] = b[7]/A[70];
b[6] -= + A[62]*b[8];
b[6] -= + A[61]*b[7];
b[6] = b[6]/A[60];
b[5] -= + A[53]*b[8];
b[5] -= + A[52]*b[7];
b[5] -= + A[51]*b[6];
b[5] = b[5]/A[50];
b[4] -= + A[44]*b[8];
b[4] -= + A[43]*b[7];
b[4] -= + A[42]*b[6];
b[4] -= + A[41]*b[5];
b[4] = b[4]/A[40];
b[3] -= + A[35]*b[8];
b[3] -= + A[34]*b[7];
b[3] -= + A[33]*b[6];
b[3] -= + A[32]*b[5];
b[3] -= + A[31]*b[4];
b[3] = b[3]/A[30];
b[2] -= + A[26]*b[8];
b[2] -= + A[25]*b[7];
b[2] -= + A[24]*b[6];
b[2] -= + A[23]*b[5];
b[2] -= + A[22]*b[4];
b[2] -= + A[21]*b[3];
b[2] = b[2]/A[20];
b[1] -= + A[17]*b[8];
b[1] -= + A[16]*b[7];
b[1] -= + A[15]*b[6];
b[1] -= + A[14]*b[5];
b[1] -= + A[13]*b[4];
b[1] -= + A[12]*b[3];
b[1] -= + A[11]*b[2];
b[1] = b[1]/A[10];
b[0] -= + A[8]*b[8];
b[0] -= + A[7]*b[7];
b[0] -= + A[6]*b[6];
b[0] -= + A[5]*b[5];
b[0] -= + A[4]*b[4];
b[0] -= + A[3]*b[3];
b[0] -= + A[2]*b[2];
b[0] -= + A[1]*b[1];
b[0] = b[0]/A[0];
}
real_t acado_solve_dim9_system( real_t* const A, real_t* const b, int* const rk_perm )
{
real_t det;
int i;
int j;
int k;
int indexMax;
int intSwap;
real_t valueMax;
real_t temp;
for (i = 0; i < 9; ++i)
{
rk_perm[i] = i;
}
det = 1.0000000000000000e+00;
for( i=0; i < (8); i++ ) {
indexMax = i;
valueMax = fabs(A[i*9+i]);
for( j=(i+1); j < 9; j++ ) {
temp = fabs(A[j*9+i]);
if( temp > valueMax ) {
indexMax = j;
valueMax = temp;
}
}
if( indexMax > i ) {
for (k = 0; k < 9; ++k)
{
rk_dim9_swap = A[i*9+k];
A[i*9+k] = A[indexMax*9+k];
A[indexMax*9+k] = rk_dim9_swap;
}
rk_dim9_swap = b[i];
b[i] = b[indexMax];
b[indexMax] = rk_dim9_swap;
intSwap = rk_perm[i];
rk_perm[i] = rk_perm[indexMax];
rk_perm[indexMax] = intSwap;
}
det *= A[i*9+i];
for( j=i+1; j < 9; j++ ) {
A[j*9+i] = -A[j*9+i]/A[i*9+i];
for( k=i+1; k < 9; k++ ) {
A[j*9+k] += A[j*9+i] * A[i*9+k];
}
b[j] += A[j*9+i] * b[i];
}
}
det *= A[80];
det = fabs(det);
acado_solve_dim9_triangular( A, b );
return det;
}
void acado_solve_dim9_system_reuse( real_t* const A, real_t* const b, int* const rk_perm )
{
rk_dim9_bPerm[0] = b[rk_perm[0]];
rk_dim9_bPerm[1] = b[rk_perm[1]];
rk_dim9_bPerm[2] = b[rk_perm[2]];
rk_dim9_bPerm[3] = b[rk_perm[3]];
rk_dim9_bPerm[4] = b[rk_perm[4]];
rk_dim9_bPerm[5] = b[rk_perm[5]];
rk_dim9_bPerm[6] = b[rk_perm[6]];
rk_dim9_bPerm[7] = b[rk_perm[7]];
rk_dim9_bPerm[8] = b[rk_perm[8]];
rk_dim9_bPerm[1] += A[9]*rk_dim9_bPerm[0];
rk_dim9_bPerm[2] += A[18]*rk_dim9_bPerm[0];
rk_dim9_bPerm[2] += A[19]*rk_dim9_bPerm[1];
rk_dim9_bPerm[3] += A[27]*rk_dim9_bPerm[0];
rk_dim9_bPerm[3] += A[28]*rk_dim9_bPerm[1];
rk_dim9_bPerm[3] += A[29]*rk_dim9_bPerm[2];
rk_dim9_bPerm[4] += A[36]*rk_dim9_bPerm[0];
rk_dim9_bPerm[4] += A[37]*rk_dim9_bPerm[1];
rk_dim9_bPerm[4] += A[38]*rk_dim9_bPerm[2];
rk_dim9_bPerm[4] += A[39]*rk_dim9_bPerm[3];
rk_dim9_bPerm[5] += A[45]*rk_dim9_bPerm[0];
rk_dim9_bPerm[5] += A[46]*rk_dim9_bPerm[1];
rk_dim9_bPerm[5] += A[47]*rk_dim9_bPerm[2];
rk_dim9_bPerm[5] += A[48]*rk_dim9_bPerm[3];
rk_dim9_bPerm[5] += A[49]*rk_dim9_bPerm[4];
rk_dim9_bPerm[6] += A[54]*rk_dim9_bPerm[0];
rk_dim9_bPerm[6] += A[55]*rk_dim9_bPerm[1];
rk_dim9_bPerm[6] += A[56]*rk_dim9_bPerm[2];
rk_dim9_bPerm[6] += A[57]*rk_dim9_bPerm[3];
rk_dim9_bPerm[6] += A[58]*rk_dim9_bPerm[4];
rk_dim9_bPerm[6] += A[59]*rk_dim9_bPerm[5];
rk_dim9_bPerm[7] += A[63]*rk_dim9_bPerm[0];
rk_dim9_bPerm[7] += A[64]*rk_dim9_bPerm[1];
rk_dim9_bPerm[7] += A[65]*rk_dim9_bPerm[2];
rk_dim9_bPerm[7] += A[66]*rk_dim9_bPerm[3];
rk_dim9_bPerm[7] += A[67]*rk_dim9_bPerm[4];
rk_dim9_bPerm[7] += A[68]*rk_dim9_bPerm[5];
rk_dim9_bPerm[7] += A[69]*rk_dim9_bPerm[6];
rk_dim9_bPerm[8] += A[72]*rk_dim9_bPerm[0];
rk_dim9_bPerm[8] += A[73]*rk_dim9_bPerm[1];
rk_dim9_bPerm[8] += A[74]*rk_dim9_bPerm[2];
rk_dim9_bPerm[8] += A[75]*rk_dim9_bPerm[3];
rk_dim9_bPerm[8] += A[76]*rk_dim9_bPerm[4];
rk_dim9_bPerm[8] += A[77]*rk_dim9_bPerm[5];
rk_dim9_bPerm[8] += A[78]*rk_dim9_bPerm[6];
rk_dim9_bPerm[8] += A[79]*rk_dim9_bPerm[7];
acado_solve_dim9_triangular( A, rk_dim9_bPerm );
b[0] = rk_dim9_bPerm[0];
b[1] = rk_dim9_bPerm[1];
b[2] = rk_dim9_bPerm[2];
b[3] = rk_dim9_bPerm[3];
b[4] = rk_dim9_bPerm[4];
b[5] = rk_dim9_bPerm[5];
b[6] = rk_dim9_bPerm[6];
b[7] = rk_dim9_bPerm[7];
b[8] = rk_dim9_bPerm[8];
}
/** Column vector of size: 1 */
static const real_t acado_Ah_mat[ 1 ] =
{ 2.5000000000000001e-02 };
/* Fixed step size:0.05 */
int acado_integrate( real_t* const rk_eta, int resetIntegrator )
{
int error;
int i;
int j;
int k;
int run;
int run1;
int tmp_index1;
int tmp_index2;
real_t det;
rk_ttt = 0.0000000000000000e+00;
rk_xxx[9] = rk_eta[117];
rk_xxx[10] = rk_eta[118];
rk_xxx[11] = rk_eta[119];
rk_xxx[12] = rk_eta[120];
rk_xxx[13] = rk_eta[121];
rk_xxx[14] = rk_eta[122];
rk_xxx[15] = rk_eta[123];
rk_xxx[16] = rk_eta[124];
rk_xxx[17] = rk_eta[125];
rk_xxx[18] = rk_eta[126];
rk_xxx[19] = rk_eta[127];
rk_xxx[20] = rk_eta[128];
for (run = 0; run < 2; ++run)
{
if( run > 0 ) {
for (i = 0; i < 9; ++i)
{
rk_diffsPrev2[i * 12] = rk_eta[i * 9 + 9];
rk_diffsPrev2[i * 12 + 1] = rk_eta[i * 9 + 10];
rk_diffsPrev2[i * 12 + 2] = rk_eta[i * 9 + 11];
rk_diffsPrev2[i * 12 + 3] = rk_eta[i * 9 + 12];
rk_diffsPrev2[i * 12 + 4] = rk_eta[i * 9 + 13];
rk_diffsPrev2[i * 12 + 5] = rk_eta[i * 9 + 14];
rk_diffsPrev2[i * 12 + 6] = rk_eta[i * 9 + 15];
rk_diffsPrev2[i * 12 + 7] = rk_eta[i * 9 + 16];
rk_diffsPrev2[i * 12 + 8] = rk_eta[i * 9 + 17];
rk_diffsPrev2[i * 12 + 9] = rk_eta[i * 3 + 90];
rk_diffsPrev2[i * 12 + 10] = rk_eta[i * 3 + 91];
rk_diffsPrev2[i * 12 + 11] = rk_eta[i * 3 + 92];
}
}
if( resetIntegrator ) {
for (i = 0; i < 1; ++i)
{
for (run1 = 0; run1 < 1; ++run1)
{
for (j = 0; j < 9; ++j)
{
rk_xxx[j] = rk_eta[j];
tmp_index1 = j;
rk_xxx[j] += + acado_Ah_mat[run1]*rk_kkk[tmp_index1];
}
acado_diffs( rk_xxx, &(rk_diffsTemp2[ run1 * 108 ]) );
for (j = 0; j < 9; ++j)
{
tmp_index1 = (run1 * 9) + (j);
rk_A[tmp_index1 * 9] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12)];
rk_A[tmp_index1 * 9 + 1] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 1)];
rk_A[tmp_index1 * 9 + 2] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 2)];
rk_A[tmp_index1 * 9 + 3] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 3)];
rk_A[tmp_index1 * 9 + 4] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 4)];
rk_A[tmp_index1 * 9 + 5] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 5)];
rk_A[tmp_index1 * 9 + 6] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 6)];
rk_A[tmp_index1 * 9 + 7] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 7)];
rk_A[tmp_index1 * 9 + 8] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 8)];
if( 0 == run1 ) rk_A[(tmp_index1 * 9) + (j)] -= 1.0000000000000000e+00;
}
acado_rhs( rk_xxx, rk_rhsTemp );
rk_b[run1 * 9] = rk_kkk[run1] - rk_rhsTemp[0];
rk_b[run1 * 9 + 1] = rk_kkk[run1 + 1] - rk_rhsTemp[1];
rk_b[run1 * 9 + 2] = rk_kkk[run1 + 2] - rk_rhsTemp[2];
rk_b[run1 * 9 + 3] = rk_kkk[run1 + 3] - rk_rhsTemp[3];
rk_b[run1 * 9 + 4] = rk_kkk[run1 + 4] - rk_rhsTemp[4];
rk_b[run1 * 9 + 5] = rk_kkk[run1 + 5] - rk_rhsTemp[5];
rk_b[run1 * 9 + 6] = rk_kkk[run1 + 6] - rk_rhsTemp[6];
rk_b[run1 * 9 + 7] = rk_kkk[run1 + 7] - rk_rhsTemp[7];
rk_b[run1 * 9 + 8] = rk_kkk[run1 + 8] - rk_rhsTemp[8];
}
det = acado_solve_dim9_system( rk_A, rk_b, rk_dim9_perm );
for (j = 0; j < 1; ++j)
{
rk_kkk[j] += rk_b[j * 9];
rk_kkk[j + 1] += rk_b[j * 9 + 1];
rk_kkk[j + 2] += rk_b[j * 9 + 2];
rk_kkk[j + 3] += rk_b[j * 9 + 3];
rk_kkk[j + 4] += rk_b[j * 9 + 4];
rk_kkk[j + 5] += rk_b[j * 9 + 5];
rk_kkk[j + 6] += rk_b[j * 9 + 6];
rk_kkk[j + 7] += rk_b[j * 9 + 7];
rk_kkk[j + 8] += rk_b[j * 9 + 8];
}
}
}
for (i = 0; i < 2; ++i)
{
for (run1 = 0; run1 < 1; ++run1)
{
for (j = 0; j < 9; ++j)
{
rk_xxx[j] = rk_eta[j];
tmp_index1 = j;
rk_xxx[j] += + acado_Ah_mat[run1]*rk_kkk[tmp_index1];
}
acado_rhs( rk_xxx, rk_rhsTemp );
rk_b[run1 * 9] = rk_kkk[run1] - rk_rhsTemp[0];
rk_b[run1 * 9 + 1] = rk_kkk[run1 + 1] - rk_rhsTemp[1];
rk_b[run1 * 9 + 2] = rk_kkk[run1 + 2] - rk_rhsTemp[2];
rk_b[run1 * 9 + 3] = rk_kkk[run1 + 3] - rk_rhsTemp[3];
rk_b[run1 * 9 + 4] = rk_kkk[run1 + 4] - rk_rhsTemp[4];
rk_b[run1 * 9 + 5] = rk_kkk[run1 + 5] - rk_rhsTemp[5];
rk_b[run1 * 9 + 6] = rk_kkk[run1 + 6] - rk_rhsTemp[6];
rk_b[run1 * 9 + 7] = rk_kkk[run1 + 7] - rk_rhsTemp[7];
rk_b[run1 * 9 + 8] = rk_kkk[run1 + 8] - rk_rhsTemp[8];
}
acado_solve_dim9_system_reuse( rk_A, rk_b, rk_dim9_perm );
for (j = 0; j < 1; ++j)
{
rk_kkk[j] += rk_b[j * 9];
rk_kkk[j + 1] += rk_b[j * 9 + 1];
rk_kkk[j + 2] += rk_b[j * 9 + 2];
rk_kkk[j + 3] += rk_b[j * 9 + 3];
rk_kkk[j + 4] += rk_b[j * 9 + 4];
rk_kkk[j + 5] += rk_b[j * 9 + 5];
rk_kkk[j + 6] += rk_b[j * 9 + 6];
rk_kkk[j + 7] += rk_b[j * 9 + 7];
rk_kkk[j + 8] += rk_b[j * 9 + 8];
}
}
for (run1 = 0; run1 < 1; ++run1)
{
for (j = 0; j < 9; ++j)
{
rk_xxx[j] = rk_eta[j];
tmp_index1 = j;
rk_xxx[j] += + acado_Ah_mat[run1]*rk_kkk[tmp_index1];
}
acado_diffs( rk_xxx, &(rk_diffsTemp2[ run1 * 108 ]) );
for (j = 0; j < 9; ++j)
{
tmp_index1 = (run1 * 9) + (j);
rk_A[tmp_index1 * 9] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12)];
rk_A[tmp_index1 * 9 + 1] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 1)];
rk_A[tmp_index1 * 9 + 2] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 2)];
rk_A[tmp_index1 * 9 + 3] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 3)];
rk_A[tmp_index1 * 9 + 4] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 4)];
rk_A[tmp_index1 * 9 + 5] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 5)];
rk_A[tmp_index1 * 9 + 6] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 6)];
rk_A[tmp_index1 * 9 + 7] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 7)];
rk_A[tmp_index1 * 9 + 8] = + acado_Ah_mat[run1]*rk_diffsTemp2[(run1 * 108) + (j * 12 + 8)];
if( 0 == run1 ) rk_A[(tmp_index1 * 9) + (j)] -= 1.0000000000000000e+00;
}
}
for (run1 = 0; run1 < 9; ++run1)
{
for (i = 0; i < 1; ++i)
{
rk_b[i * 9] = - rk_diffsTemp2[(i * 108) + (run1)];
rk_b[i * 9 + 1] = - rk_diffsTemp2[(i * 108) + (run1 + 12)];
rk_b[i * 9 + 2] = - rk_diffsTemp2[(i * 108) + (run1 + 24)];
rk_b[i * 9 + 3] = - rk_diffsTemp2[(i * 108) + (run1 + 36)];
rk_b[i * 9 + 4] = - rk_diffsTemp2[(i * 108) + (run1 + 48)];
rk_b[i * 9 + 5] = - rk_diffsTemp2[(i * 108) + (run1 + 60)];
rk_b[i * 9 + 6] = - rk_diffsTemp2[(i * 108) + (run1 + 72)];
rk_b[i * 9 + 7] = - rk_diffsTemp2[(i * 108) + (run1 + 84)];
rk_b[i * 9 + 8] = - rk_diffsTemp2[(i * 108) + (run1 + 96)];
}
if( 0 == run1 ) {
det = acado_solve_dim9_system( rk_A, rk_b, rk_dim9_perm );
}
else {
acado_solve_dim9_system_reuse( rk_A, rk_b, rk_dim9_perm );
}
for (i = 0; i < 1; ++i)
{
rk_diffK[i] = rk_b[i * 9];
rk_diffK[i + 1] = rk_b[i * 9 + 1];
rk_diffK[i + 2] = rk_b[i * 9 + 2];
rk_diffK[i + 3] = rk_b[i * 9 + 3];
rk_diffK[i + 4] = rk_b[i * 9 + 4];
rk_diffK[i + 5] = rk_b[i * 9 + 5];
rk_diffK[i + 6] = rk_b[i * 9 + 6];
rk_diffK[i + 7] = rk_b[i * 9 + 7];
rk_diffK[i + 8] = rk_b[i * 9 + 8];
}
for (i = 0; i < 9; ++i)
{
rk_diffsNew2[(i * 12) + (run1)] = (i == run1-0);
rk_diffsNew2[(i * 12) + (run1)] += + rk_diffK[i]*(real_t)5.0000000000000003e-02;
}
}
for (run1 = 0; run1 < 3; ++run1)
{
for (i = 0; i < 1; ++i)
{
for (j = 0; j < 9; ++j)
{
tmp_index1 = (i * 9) + (j);
tmp_index2 = (run1) + (j * 12);
rk_b[tmp_index1] = - rk_diffsTemp2[(i * 108) + (tmp_index2 + 9)];
}
}
acado_solve_dim9_system_reuse( rk_A, rk_b, rk_dim9_perm );
for (i = 0; i < 1; ++i)
{
rk_diffK[i] = rk_b[i * 9];
rk_diffK[i + 1] = rk_b[i * 9 + 1];
rk_diffK[i + 2] = rk_b[i * 9 + 2];
rk_diffK[i + 3] = rk_b[i * 9 + 3];
rk_diffK[i + 4] = rk_b[i * 9 + 4];
rk_diffK[i + 5] = rk_b[i * 9 + 5];
rk_diffK[i + 6] = rk_b[i * 9 + 6];
rk_diffK[i + 7] = rk_b[i * 9 + 7];
rk_diffK[i + 8] = rk_b[i * 9 + 8];
}
for (i = 0; i < 9; ++i)
{
rk_diffsNew2[(i * 12) + (run1 + 9)] = + rk_diffK[i]*(real_t)5.0000000000000003e-02;
}
}
rk_eta[0] += + rk_kkk[0]*(real_t)5.0000000000000003e-02;
rk_eta[1] += + rk_kkk[1]*(real_t)5.0000000000000003e-02;
rk_eta[2] += + rk_kkk[2]*(real_t)5.0000000000000003e-02;
rk_eta[3] += + rk_kkk[3]*(real_t)5.0000000000000003e-02;
rk_eta[4] += + rk_kkk[4]*(real_t)5.0000000000000003e-02;
rk_eta[5] += + rk_kkk[5]*(real_t)5.0000000000000003e-02;
rk_eta[6] += + rk_kkk[6]*(real_t)5.0000000000000003e-02;
rk_eta[7] += + rk_kkk[7]*(real_t)5.0000000000000003e-02;
rk_eta[8] += + rk_kkk[8]*(real_t)5.0000000000000003e-02;
if( run == 0 ) {
for (i = 0; i < 9; ++i)
{
for (j = 0; j < 9; ++j)
{
tmp_index2 = (j) + (i * 9);
rk_eta[tmp_index2 + 9] = rk_diffsNew2[(i * 12) + (j)];
}
for (j = 0; j < 3; ++j)
{
tmp_index2 = (j) + (i * 3);
rk_eta[tmp_index2 + 90] = rk_diffsNew2[(i * 12) + (j + 9)];
}
}
}
else {
for (i = 0; i < 9; ++i)
{
for (j = 0; j < 9; ++j)
{
tmp_index2 = (j) + (i * 9);
rk_eta[tmp_index2 + 9] = + rk_diffsNew2[i * 12]*rk_diffsPrev2[j];
rk_eta[tmp_index2 + 9] += + rk_diffsNew2[i * 12 + 1]*rk_diffsPrev2[j + 12];
rk_eta[tmp_index2 + 9] += + rk_diffsNew2[i * 12 + 2]*rk_diffsPrev2[j + 24];
rk_eta[tmp_index2 + 9] += + rk_diffsNew2[i * 12 + 3]*rk_diffsPrev2[j + 36];
rk_eta[tmp_index2 + 9] += + rk_diffsNew2[i * 12 + 4]*rk_diffsPrev2[j + 48];
rk_eta[tmp_index2 + 9] += + rk_diffsNew2[i * 12 + 5]*rk_diffsPrev2[j + 60];
rk_eta[tmp_index2 + 9] += + rk_diffsNew2[i * 12 + 6]*rk_diffsPrev2[j + 72];
rk_eta[tmp_index2 + 9] += + rk_diffsNew2[i * 12 + 7]*rk_diffsPrev2[j + 84];
rk_eta[tmp_index2 + 9] += + rk_diffsNew2[i * 12 + 8]*rk_diffsPrev2[j + 96];
}
for (j = 0; j < 3; ++j)
{
tmp_index2 = (j) + (i * 3);
rk_eta[tmp_index2 + 90] = rk_diffsNew2[(i * 12) + (j + 9)];
rk_eta[tmp_index2 + 90] += + rk_diffsNew2[i * 12]*rk_diffsPrev2[j + 9];
rk_eta[tmp_index2 + 90] += + rk_diffsNew2[i * 12 + 1]*rk_diffsPrev2[j + 21];
rk_eta[tmp_index2 + 90] += + rk_diffsNew2[i * 12 + 2]*rk_diffsPrev2[j + 33];
rk_eta[tmp_index2 + 90] += + rk_diffsNew2[i * 12 + 3]*rk_diffsPrev2[j + 45];
rk_eta[tmp_index2 + 90] += + rk_diffsNew2[i * 12 + 4]*rk_diffsPrev2[j + 57];
rk_eta[tmp_index2 + 90] += + rk_diffsNew2[i * 12 + 5]*rk_diffsPrev2[j + 69];
rk_eta[tmp_index2 + 90] += + rk_diffsNew2[i * 12 + 6]*rk_diffsPrev2[j + 81];
rk_eta[tmp_index2 + 90] += + rk_diffsNew2[i * 12 + 7]*rk_diffsPrev2[j + 93];
rk_eta[tmp_index2 + 90] += + rk_diffsNew2[i * 12 + 8]*rk_diffsPrev2[j + 105];
}
}
}
resetIntegrator = 0;
rk_ttt += 5.0000000000000000e-01;
}
for (i = 0; i < 9; ++i)
{
}
if( det < 1e-12 ) {
error = 2;
} else if( det < 1e-6 ) {
error = 1;
} else {
error = 0;
}
return error;
}
|
relu_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2020, OPEN AI LAB
* Author: bhu@openailab.com
*/
#include <math.h>
#include "sys_port.h"
#include "module.h"
#include "tengine_ir.h"
#include "../../cpu_node_ops.h"
#include "tengine_op.h"
#include "relu_param.h"
static int ref_relu_fp32(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope,
int num_thread)
{
int batch = input_tensor->dims[0];
int channels = input_tensor->dims[1];
int h = input_tensor->dims[2];
int w = input_tensor->dims[3];
int size = h * w;
int c_step = h * w;
int batch_step = channels * c_step;
float* input_data = input_tensor->data;
float* out_data = output_tensor->data;
if (negative_slope == 0)
{
for (int n = 0; n < batch; n++)
{
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + batch_step * n + c_step * q;
float* dst = out_data + batch_step * n + c_step * q;
for (int i = 0; i < size; i++)
{
if (src[i] < 0)
dst[i] = 0;
else
dst[i] = src[i];
}
}
}
}
else
{
for (int n = 0; n < batch; n++)
{
#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = input_data + batch_step * n + c_step * q;
float* dst = out_data + batch_step * n + c_step * q;
for (int i = 0; i < size; i++)
{
if (src[i] < 0)
dst[i] = src[i] * negative_slope;
else
dst[i] = src[i];
}
}
}
}
return 0;
}
static int ref_relu_uint8(struct ir_tensor* input_tensor, struct ir_tensor* output_tensor, float negative_slope,
int num_thread)
{
int batch = input_tensor->dims[0];
int channels = input_tensor->dims[1];
int h = input_tensor->dims[2];
int w = input_tensor->dims[3];
int size = h * w;
int c_step = h * w;
int batch_step = channels * c_step;
int total_size = batch * batch_step;
/* dequant */
uint8_t* input_uint8 = input_tensor->data;
uint8_t* output_uint8 = output_tensor->data;
float input_scale = input_tensor->scale;
float output_scale = output_tensor->scale;
int32_t input_zero = input_tensor->zero_point;
int32_t output_zero = output_tensor->zero_point;
float* data_fp32 = (float*)sys_malloc(total_size * sizeof(float));
for(int i=0; i<total_size; i++)
{
data_fp32[i] = ((float )input_uint8[i] - (float )input_zero) * input_scale;
}
/* process */
if (negative_slope == 0)
{
for (int n = 0; n < batch; n++)
{
//#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = data_fp32 + batch_step * n + c_step * q;
float* dst = data_fp32 + batch_step * n + c_step * q;
for (int i = 0; i < size; i++)
{
if (src[i] < 0)
dst[i] = 0;
else
dst[i] = src[i];
}
}
}
}
else
{
for (int n = 0; n < batch; n++)
{
//#pragma omp parallel for num_threads(num_thread)
for (int q = 0; q < channels; q++)
{
float* src = data_fp32 + batch_step * n + c_step * q;
float* dst = data_fp32 + batch_step * n + c_step * q;
for (int i = 0; i < size; i++)
{
if (src[i] < 0)
dst[i] = src[i] * negative_slope;
else
dst[i] = src[i];
}
}
}
}
/* quant */
for(int i=0; i<total_size; i++)
{
int udata = round(data_fp32[i] / output_scale + output_zero);
if (udata > 255)
udata = 255;
else if (udata < 0)
udata = 0;
output_uint8[i] = udata;
}
sys_free(data_fp32);
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* ir_node = exec_node->ir_node;
struct ir_graph* ir_graph = ir_node->graph;
struct ir_tensor* input_tensor;
struct ir_tensor* output_tensor;
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[0]);
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct relu_param* relu_param = ( struct relu_param* )ir_node->op.param_mem;
int ret = 0;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_relu_fp32(input_tensor, output_tensor, relu_param->negative_slope, exec_graph->num_thread);
else
ret = ref_relu_uint8(input_tensor, output_tensor, relu_param->negative_slope, exec_graph->num_thread);
return ret;
}
static int reshape(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct ir_node* node = exec_node->ir_node;
struct ir_graph* ir_graph = node->graph;
struct ir_tensor* input = get_ir_graph_tensor(ir_graph, node->input_tensors[0]);
struct ir_tensor* output = get_ir_graph_tensor(ir_graph, node->output_tensors[0]);
int ret = set_ir_tensor_shape(output, input->dims, input->dim_num);
return ret;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct ir_node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = NULL,
.run = run,
.reshape = reshape,
.postrun = NULL,
.init_node = init_node,
.release_node = release_node,
.score = score};
static int reg_relu_hcl_ops(void* arg)
{
return register_builtin_node_ops(OP_RELU, &hcl_node_ops);
}
static int unreg_relu_hcl_ops(void* arg)
{
return unregister_builtin_node_ops(OP_RELU, &hcl_node_ops);
}
AUTO_REGISTER_OPS(reg_relu_hcl_ops);
AUTO_UNREGISTER_OPS(unreg_relu_hcl_ops);
|
GB_subassign_03.c | //------------------------------------------------------------------------------
// GB_subassign_03: C(I,J) += scalar ; using S
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// Method 03: C(I,J) += scalar ; using S
// M: NULL
// Mask_comp: false
// C_replace: false
// accum: present
// A: scalar
// S: constructed
// C is not bitmap: use GB_bitmap_assign instead
#include "GB_subassign_methods.h"
GrB_Info GB_subassign_03
(
GrB_Matrix C,
// input:
const GrB_Index *I,
const int64_t ni,
const int64_t nI,
const int Ikind,
const int64_t Icolon [3],
const GrB_Index *J,
const int64_t nj,
const int64_t nJ,
const int Jkind,
const int64_t Jcolon [3],
const GrB_BinaryOp accum,
const void *scalar,
const GrB_Type atype,
GB_Context Context
)
{
//--------------------------------------------------------------------------
// check inputs
//--------------------------------------------------------------------------
ASSERT (!GB_IS_BITMAP (C)) ;
//--------------------------------------------------------------------------
// S = C(I,J)
//--------------------------------------------------------------------------
GB_EMPTY_TASKLIST ;
GB_OK (GB_subassign_symbolic (&S, C, I, ni, J, nj, true, Context)) ;
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GB_GET_C ; // C must not be bitmap
const int64_t *GB_RESTRICT Ch = C->h ;
const int64_t *GB_RESTRICT Cp = C->p ;
const bool C_is_hyper = (Ch != NULL) ;
const int64_t Cnvec = C->nvec ;
GB_GET_S ;
GB_GET_ACCUM_SCALAR ;
//--------------------------------------------------------------------------
// Method 03: C(I,J) += scalar ; using S
//--------------------------------------------------------------------------
// Time: Optimal; must visit all IxJ, so Omega(|I|*|J|) is required.
// Entries in S are found and the corresponding entry in C replaced with
// the scalar.
// Method 01 and Method 03 are very similar.
//--------------------------------------------------------------------------
// Parallel: all IxJ (Methods 01, 03, 13, 15, 17, 19)
//--------------------------------------------------------------------------
GB_SUBASSIGN_IXJ_SLICE ;
//--------------------------------------------------------------------------
// phase 1: create zombies, update entries, and count pending tuples
//--------------------------------------------------------------------------
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(+:nzombies)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE1 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC) += scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
bool found = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
if (!found)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, the scalar is present
// [. A 1]: action: ( insert )
task_pending++ ;
}
else
{
// ----[C A 1] or [X A 1]-----------------------------------
// both S (i,j) and A (i,j) present
// [C A 1]: action: ( =C+A ): apply accum
// [X A 1]: action: ( undelete ): zombie lives
GB_C_S_LOOKUP ;
GB_withaccum_C_A_1_scalar ;
GB_NEXT (S) ;
}
}
}
GB_PHASE1_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// phase 2: insert pending tuples
//--------------------------------------------------------------------------
GB_PENDING_CUMSUM ;
#pragma omp parallel for num_threads(nthreads) schedule(dynamic,1) \
reduction(&&:pending_sorted)
for (taskid = 0 ; taskid < ntasks ; taskid++)
{
//----------------------------------------------------------------------
// get the task descriptor
//----------------------------------------------------------------------
GB_GET_IXJ_TASK_DESCRIPTOR_PHASE2 (iA_start, iA_end) ;
//----------------------------------------------------------------------
// compute all vectors in this task
//----------------------------------------------------------------------
for (int64_t j = kfirst ; j <= klast ; j++)
{
//------------------------------------------------------------------
// get jC, the corresponding vector of C
//------------------------------------------------------------------
int64_t jC = GB_ijlist (J, j, Jkind, Jcolon) ;
//------------------------------------------------------------------
// get S(iA_start:end,j)
//------------------------------------------------------------------
GB_GET_VECTOR_FOR_IXJ (S, iA_start) ;
//------------------------------------------------------------------
// C(I(iA_start,iA_end-1),jC) += scalar
//------------------------------------------------------------------
for (int64_t iA = iA_start ; iA < iA_end ; iA++)
{
bool found = (pS < pS_end) && (GBI (Si, pS, Svlen) == iA) ;
if (!found)
{
// ----[. A 1]----------------------------------------------
// S (i,j) is not present, the scalar is present
// [. A 1]: action: ( insert )
int64_t iC = GB_ijlist (I, iA, Ikind, Icolon) ;
GB_PENDING_INSERT (scalar) ;
}
else
{
// both S (i,j) and A (i,j) present
GB_NEXT (S) ;
}
}
}
GB_PHASE2_TASK_WRAPUP ;
}
//--------------------------------------------------------------------------
// finalize the matrix and return result
//--------------------------------------------------------------------------
GB_SUBASSIGN_WRAPUP ;
}
|
simd-12.c | /* { dg-do run } */
/* { dg-additional-options "-msse2" { target sse2_runtime } } */
/* { dg-additional-options "-mavx" { target avx_runtime } } */
int
main ()
{
int k = 0, i, s = 0;
#pragma omp parallel
#pragma omp for simd linear(k : 3) reduction(+: s) schedule (static, 16)
for (i = 0; i < 128; i++)
{
k = k + 3;
s = s + k;
}
if (s != 128 * 129 / 2 * 3) __builtin_abort ();
return 0;
}
|
CGOpenMPRuntime.h | //===----- CGOpenMPRuntime.h - Interface to OpenMP Runtimes -----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This provides a class for OpenMP runtime code generation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#define LLVM_CLANG_LIB_CODEGEN_CGOPENMPRUNTIME_H
#include "CGValue.h"
#include "clang/AST/DeclOpenMP.h"
#include "clang/AST/GlobalDecl.h"
#include "clang/AST/Type.h"
#include "clang/Basic/OpenMPKinds.h"
#include "clang/Basic/SourceLocation.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringSet.h"
#include "llvm/Frontend/OpenMP/OMPConstants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/ValueHandle.h"
namespace llvm {
class ArrayType;
class Constant;
class FunctionType;
class GlobalVariable;
class StructType;
class Type;
class Value;
} // namespace llvm
namespace clang {
class Expr;
class OMPDependClause;
class OMPExecutableDirective;
class OMPLoopDirective;
class VarDecl;
class OMPDeclareReductionDecl;
class IdentifierInfo;
namespace CodeGen {
class Address;
class CodeGenFunction;
class CodeGenModule;
/// A basic class for pre|post-action for advanced codegen sequence for OpenMP
/// region.
class PrePostActionTy {
public:
explicit PrePostActionTy() {}
virtual void Enter(CodeGenFunction &CGF) {}
virtual void Exit(CodeGenFunction &CGF) {}
virtual ~PrePostActionTy() {}
};
/// Class provides a way to call simple version of codegen for OpenMP region, or
/// an advanced with possible pre|post-actions in codegen.
class RegionCodeGenTy final {
intptr_t CodeGen;
typedef void (*CodeGenTy)(intptr_t, CodeGenFunction &, PrePostActionTy &);
CodeGenTy Callback;
mutable PrePostActionTy *PrePostAction;
RegionCodeGenTy() = delete;
RegionCodeGenTy &operator=(const RegionCodeGenTy &) = delete;
template <typename Callable>
static void CallbackFn(intptr_t CodeGen, CodeGenFunction &CGF,
PrePostActionTy &Action) {
return (*reinterpret_cast<Callable *>(CodeGen))(CGF, Action);
}
public:
template <typename Callable>
RegionCodeGenTy(
Callable &&CodeGen,
typename std::enable_if<
!std::is_same<typename std::remove_reference<Callable>::type,
RegionCodeGenTy>::value>::type * = nullptr)
: CodeGen(reinterpret_cast<intptr_t>(&CodeGen)),
Callback(CallbackFn<typename std::remove_reference<Callable>::type>),
PrePostAction(nullptr) {}
void setAction(PrePostActionTy &Action) const { PrePostAction = &Action; }
void operator()(CodeGenFunction &CGF) const;
};
struct OMPTaskDataTy final {
SmallVector<const Expr *, 4> PrivateVars;
SmallVector<const Expr *, 4> PrivateCopies;
SmallVector<const Expr *, 4> FirstprivateVars;
SmallVector<const Expr *, 4> FirstprivateCopies;
SmallVector<const Expr *, 4> FirstprivateInits;
SmallVector<const Expr *, 4> LastprivateVars;
SmallVector<const Expr *, 4> LastprivateCopies;
SmallVector<const Expr *, 4> ReductionVars;
SmallVector<const Expr *, 4> ReductionCopies;
SmallVector<const Expr *, 4> ReductionOps;
SmallVector<std::pair<OpenMPDependClauseKind, const Expr *>, 4> Dependences;
llvm::PointerIntPair<llvm::Value *, 1, bool> Final;
llvm::PointerIntPair<llvm::Value *, 1, bool> Schedule;
llvm::PointerIntPair<llvm::Value *, 1, bool> Priority;
llvm::Value *Reductions = nullptr;
unsigned NumberOfParts = 0;
bool Tied = true;
bool Nogroup = false;
};
/// Class intended to support codegen of all kind of the reduction clauses.
class ReductionCodeGen {
private:
/// Data required for codegen of reduction clauses.
struct ReductionData {
/// Reference to the original shared item.
const Expr *Ref = nullptr;
/// Helper expression for generation of private copy.
const Expr *Private = nullptr;
/// Helper expression for generation reduction operation.
const Expr *ReductionOp = nullptr;
ReductionData(const Expr *Ref, const Expr *Private, const Expr *ReductionOp)
: Ref(Ref), Private(Private), ReductionOp(ReductionOp) {}
};
/// List of reduction-based clauses.
SmallVector<ReductionData, 4> ClausesData;
/// List of addresses of original shared variables/expressions.
SmallVector<std::pair<LValue, LValue>, 4> SharedAddresses;
/// Sizes of the reduction items in chars.
SmallVector<std::pair<llvm::Value *, llvm::Value *>, 4> Sizes;
/// Base declarations for the reduction items.
SmallVector<const VarDecl *, 4> BaseDecls;
/// Emits lvalue for shared expression.
LValue emitSharedLValue(CodeGenFunction &CGF, const Expr *E);
/// Emits upper bound for shared expression (if array section).
LValue emitSharedLValueUB(CodeGenFunction &CGF, const Expr *E);
/// Performs aggregate initialization.
/// \param N Number of reduction item in the common list.
/// \param PrivateAddr Address of the corresponding private item.
/// \param SharedLVal Address of the original shared variable.
/// \param DRD Declare reduction construct used for reduction item.
void emitAggregateInitialization(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr, LValue SharedLVal,
const OMPDeclareReductionDecl *DRD);
public:
ReductionCodeGen(ArrayRef<const Expr *> Shareds,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> ReductionOps);
/// Emits lvalue for a reduction item.
/// \param N Number of the reduction item.
void emitSharedLValue(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
void emitAggregateType(CodeGenFunction &CGF, unsigned N);
/// Emits the code for the variable-modified type, if required.
/// \param N Number of the reduction item.
/// \param Size Size of the type in chars.
void emitAggregateType(CodeGenFunction &CGF, unsigned N, llvm::Value *Size);
/// Performs initialization of the private copy for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
/// \param DefaultInit Default initialization sequence that should be
/// performed if no reduction specific initialization is found.
/// \param SharedLVal Address of the original shared variable.
void
emitInitialization(CodeGenFunction &CGF, unsigned N, Address PrivateAddr,
LValue SharedLVal,
llvm::function_ref<bool(CodeGenFunction &)> DefaultInit);
/// Returns true if the private copy requires cleanups.
bool needCleanups(unsigned N);
/// Emits cleanup code for the reduction item.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
void emitCleanups(CodeGenFunction &CGF, unsigned N, Address PrivateAddr);
/// Adjusts \p PrivatedAddr for using instead of the original variable
/// address in normal operations.
/// \param N Number of the reduction item.
/// \param PrivateAddr Address of the corresponding private item.
Address adjustPrivateAddress(CodeGenFunction &CGF, unsigned N,
Address PrivateAddr);
/// Returns LValue for the reduction item.
LValue getSharedLValue(unsigned N) const { return SharedAddresses[N].first; }
/// Returns the size of the reduction item (in chars and total number of
/// elements in the item), or nullptr, if the size is a constant.
std::pair<llvm::Value *, llvm::Value *> getSizes(unsigned N) const {
return Sizes[N];
}
/// Returns the base declaration of the reduction item.
const VarDecl *getBaseDecl(unsigned N) const { return BaseDecls[N]; }
/// Returns the base declaration of the reduction item.
const Expr *getRefExpr(unsigned N) const { return ClausesData[N].Ref; }
/// Returns true if the initialization of the reduction item uses initializer
/// from declare reduction construct.
bool usesReductionInitializer(unsigned N) const;
};
class CGOpenMPRuntime {
public:
/// Allows to disable automatic handling of functions used in target regions
/// as those marked as `omp declare target`.
class DisableAutoDeclareTargetRAII {
CodeGenModule &CGM;
bool SavedShouldMarkAsGlobal;
public:
DisableAutoDeclareTargetRAII(CodeGenModule &CGM);
~DisableAutoDeclareTargetRAII();
};
/// Manages list of nontemporal decls for the specified directive.
class NontemporalDeclsRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
NontemporalDeclsRAII(CodeGenModule &CGM, const OMPLoopDirective &S);
~NontemporalDeclsRAII();
};
/// Maps the expression for the lastprivate variable to the global copy used
/// to store new value because original variables are not mapped in inner
/// parallel regions. Only private copies are captured but we need also to
/// store private copy in shared address.
/// Also, stores the expression for the private loop counter and it
/// threaprivate name.
struct LastprivateConditionalData {
llvm::SmallDenseMap<CanonicalDeclPtr<const Decl>, SmallString<16>>
DeclToUniqeName;
LValue IVLVal;
SmallString<16> IVName;
/// True if original lvalue for loop counter can be used in codegen (simd
/// region or simd only mode) and no need to create threadprivate
/// references.
bool UseOriginalIV = false;
};
/// Manages list of lastprivate conditional decls for the specified directive.
class LastprivateConditionalRAII {
CodeGenModule &CGM;
const bool NeedToPush;
public:
LastprivateConditionalRAII(CodeGenFunction &CGF,
const OMPExecutableDirective &S, LValue IVLVal);
~LastprivateConditionalRAII();
};
protected:
CodeGenModule &CGM;
StringRef FirstSeparator, Separator;
/// Constructor allowing to redefine the name separator for the variables.
explicit CGOpenMPRuntime(CodeGenModule &CGM, StringRef FirstSeparator,
StringRef Separator);
/// Creates offloading entry for the provided entry ID \a ID,
/// address \a Addr, size \a Size, and flags \a Flags.
virtual void createOffloadEntry(llvm::Constant *ID, llvm::Constant *Addr,
uint64_t Size, int32_t Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Helper to emit outlined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Lambda codegen specific to an accelerator device.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunctionHelper(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emits object of ident_t type with info for source location.
/// \param Flags Flags for OpenMP location.
///
llvm::Value *emitUpdateLocation(CodeGenFunction &CGF, SourceLocation Loc,
unsigned Flags = 0);
/// Returns pointer to ident_t type.
llvm::Type *getIdentTyPointerTy();
/// Gets thread id value for the current thread.
///
llvm::Value *getThreadID(CodeGenFunction &CGF, SourceLocation Loc);
/// Get the function name of an outlined region.
// The name can be customized depending on the target.
//
virtual StringRef getOutlinedHelperName() const { return ".omp_outlined."; }
/// Emits \p Callee function call with arguments \p Args with location \p Loc.
void emitCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee Callee,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits address of the word in a memory where current thread id is
/// stored.
virtual Address emitThreadIDAddress(CodeGenFunction &CGF, SourceLocation Loc);
void setLocThreadIdInsertPt(CodeGenFunction &CGF,
bool AtCurrentPoint = false);
void clearLocThreadIdInsertPt(CodeGenFunction &CGF);
/// Check if the default location must be constant.
/// Default is false to support OMPT/OMPD.
virtual bool isDefaultLocationConstant() const { return false; }
/// Returns additional flags that can be stored in reserved_2 field of the
/// default location.
virtual unsigned getDefaultLocationReserved2Flags() const { return 0; }
/// Tries to emit declare variant function for \p OldGD from \p NewGD.
/// \param OrigAddr LLVM IR value for \p OldGD.
/// \param IsForDefinition true, if requested emission for the definition of
/// \p OldGD.
/// \returns true, was able to emit a definition function for \p OldGD, which
/// points to \p NewGD.
virtual bool tryEmitDeclareVariant(const GlobalDecl &NewGD,
const GlobalDecl &OldGD,
llvm::GlobalValue *OrigAddr,
bool IsForDefinition);
/// Returns default flags for the barriers depending on the directive, for
/// which this barier is going to be emitted.
static unsigned getDefaultFlagsForBarriers(OpenMPDirectiveKind Kind);
/// Get the LLVM type for the critical name.
llvm::ArrayType *getKmpCriticalNameTy() const {return KmpCriticalNameTy;}
/// Returns corresponding lock object for the specified critical region
/// name. If the lock object does not exist it is created, otherwise the
/// reference to the existing copy is returned.
/// \param CriticalName Name of the critical region.
///
llvm::Value *getCriticalRegionLock(StringRef CriticalName);
private:
/// Default const ident_t object used for initialization of all other
/// ident_t objects.
llvm::Constant *DefaultOpenMPPSource = nullptr;
using FlagsTy = std::pair<unsigned, unsigned>;
/// Map of flags and corresponding default locations.
using OpenMPDefaultLocMapTy = llvm::DenseMap<FlagsTy, llvm::Value *>;
OpenMPDefaultLocMapTy OpenMPDefaultLocMap;
Address getOrCreateDefaultLocation(unsigned Flags);
QualType IdentQTy;
llvm::StructType *IdentTy = nullptr;
/// Map for SourceLocation and OpenMP runtime library debug locations.
typedef llvm::DenseMap<unsigned, llvm::Value *> OpenMPDebugLocMapTy;
OpenMPDebugLocMapTy OpenMPDebugLocMap;
/// The type for a microtask which gets passed to __kmpc_fork_call().
/// Original representation is:
/// typedef void (kmpc_micro)(kmp_int32 global_tid, kmp_int32 bound_tid,...);
llvm::FunctionType *Kmpc_MicroTy = nullptr;
/// Stores debug location and ThreadID for the function.
struct DebugLocThreadIdTy {
llvm::Value *DebugLoc;
llvm::Value *ThreadID;
/// Insert point for the service instructions.
llvm::AssertingVH<llvm::Instruction> ServiceInsertPt = nullptr;
};
/// Map of local debug location, ThreadId and functions.
typedef llvm::DenseMap<llvm::Function *, DebugLocThreadIdTy>
OpenMPLocThreadIDMapTy;
OpenMPLocThreadIDMapTy OpenMPLocThreadIDMap;
/// Map of UDRs and corresponding combiner/initializer.
typedef llvm::DenseMap<const OMPDeclareReductionDecl *,
std::pair<llvm::Function *, llvm::Function *>>
UDRMapTy;
UDRMapTy UDRMap;
/// Map of functions and locally defined UDRs.
typedef llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareReductionDecl *, 4>>
FunctionUDRMapTy;
FunctionUDRMapTy FunctionUDRMap;
/// Map from the user-defined mapper declaration to its corresponding
/// functions.
llvm::DenseMap<const OMPDeclareMapperDecl *, llvm::Function *> UDMMap;
/// Map of functions and their local user-defined mappers.
using FunctionUDMMapTy =
llvm::DenseMap<llvm::Function *,
SmallVector<const OMPDeclareMapperDecl *, 4>>;
FunctionUDMMapTy FunctionUDMMap;
/// Type kmp_critical_name, originally defined as typedef kmp_int32
/// kmp_critical_name[8];
llvm::ArrayType *KmpCriticalNameTy;
/// An ordered map of auto-generated variables to their unique names.
/// It stores variables with the following names: 1) ".gomp_critical_user_" +
/// <critical_section_name> + ".var" for "omp critical" directives; 2)
/// <mangled_name_for_global_var> + ".cache." for cache for threadprivate
/// variables.
llvm::StringMap<llvm::AssertingVH<llvm::Constant>, llvm::BumpPtrAllocator>
InternalVars;
/// Type typedef kmp_int32 (* kmp_routine_entry_t)(kmp_int32, void *);
llvm::Type *KmpRoutineEntryPtrTy = nullptr;
QualType KmpRoutineEntryPtrQTy;
/// Type typedef struct kmp_task {
/// void * shareds; /**< pointer to block of pointers to
/// shared vars */
/// kmp_routine_entry_t routine; /**< pointer to routine to call for
/// executing task */
/// kmp_int32 part_id; /**< part id for the task */
/// kmp_routine_entry_t destructors; /* pointer to function to invoke
/// deconstructors of firstprivate C++ objects */
/// } kmp_task_t;
QualType KmpTaskTQTy;
/// Saved kmp_task_t for task directive.
QualType SavedKmpTaskTQTy;
/// Saved kmp_task_t for taskloop-based directive.
QualType SavedKmpTaskloopTQTy;
/// Type typedef struct kmp_depend_info {
/// kmp_intptr_t base_addr;
/// size_t len;
/// struct {
/// bool in:1;
/// bool out:1;
/// } flags;
/// } kmp_depend_info_t;
QualType KmpDependInfoTy;
/// struct kmp_dim { // loop bounds info casted to kmp_int64
/// kmp_int64 lo; // lower
/// kmp_int64 up; // upper
/// kmp_int64 st; // stride
/// };
QualType KmpDimTy;
/// Type struct __tgt_offload_entry{
/// void *addr; // Pointer to the offload entry info.
/// // (function or global)
/// char *name; // Name of the function or global.
/// size_t size; // Size of the entry info (0 if it a function).
/// };
QualType TgtOffloadEntryQTy;
/// struct __tgt_device_image{
/// void *ImageStart; // Pointer to the target code start.
/// void *ImageEnd; // Pointer to the target code end.
/// // We also add the host entries to the device image, as it may be useful
/// // for the target runtime to have access to that information.
/// __tgt_offload_entry *EntriesBegin; // Begin of the table with all
/// // the entries.
/// __tgt_offload_entry *EntriesEnd; // End of the table with all the
/// // entries (non inclusive).
/// };
QualType TgtDeviceImageQTy;
/// struct __tgt_bin_desc{
/// int32_t NumDevices; // Number of devices supported.
/// __tgt_device_image *DeviceImages; // Arrays of device images
/// // (one per device).
/// __tgt_offload_entry *EntriesBegin; // Begin of the table with all the
/// // entries.
/// __tgt_offload_entry *EntriesEnd; // End of the table with all the
/// // entries (non inclusive).
/// };
QualType TgtBinaryDescriptorQTy;
/// Entity that registers the offloading constants that were emitted so
/// far.
class OffloadEntriesInfoManagerTy {
CodeGenModule &CGM;
/// Number of entries registered so far.
unsigned OffloadingEntriesNum = 0;
public:
/// Base class of the entries info.
class OffloadEntryInfo {
public:
/// Kind of a given entry.
enum OffloadingEntryInfoKinds : unsigned {
/// Entry is a target region.
OffloadingEntryInfoTargetRegion = 0,
/// Entry is a declare target variable.
OffloadingEntryInfoDeviceGlobalVar = 1,
/// Invalid entry info.
OffloadingEntryInfoInvalid = ~0u
};
protected:
OffloadEntryInfo() = delete;
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind) : Kind(Kind) {}
explicit OffloadEntryInfo(OffloadingEntryInfoKinds Kind, unsigned Order,
uint32_t Flags)
: Flags(Flags), Order(Order), Kind(Kind) {}
~OffloadEntryInfo() = default;
public:
bool isValid() const { return Order != ~0u; }
unsigned getOrder() const { return Order; }
OffloadingEntryInfoKinds getKind() const { return Kind; }
uint32_t getFlags() const { return Flags; }
void setFlags(uint32_t NewFlags) { Flags = NewFlags; }
llvm::Constant *getAddress() const {
return cast_or_null<llvm::Constant>(Addr);
}
void setAddress(llvm::Constant *V) {
assert(!Addr.pointsToAliveValue() && "Address has been set before!");
Addr = V;
}
static bool classof(const OffloadEntryInfo *Info) { return true; }
private:
/// Address of the entity that has to be mapped for offloading.
llvm::WeakTrackingVH Addr;
/// Flags associated with the device global.
uint32_t Flags = 0u;
/// Order this entry was emitted.
unsigned Order = ~0u;
OffloadingEntryInfoKinds Kind = OffloadingEntryInfoInvalid;
};
/// Return true if a there are no entries defined.
bool empty() const;
/// Return number of entries defined so far.
unsigned size() const { return OffloadingEntriesNum; }
OffloadEntriesInfoManagerTy(CodeGenModule &CGM) : CGM(CGM) {}
//
// Target region entries related.
//
/// Kind of the target registry entry.
enum OMPTargetRegionEntryKind : uint32_t {
/// Mark the entry as target region.
OMPTargetRegionEntryTargetRegion = 0x0,
/// Mark the entry as a global constructor.
OMPTargetRegionEntryCtor = 0x02,
/// Mark the entry as a global destructor.
OMPTargetRegionEntryDtor = 0x04,
};
/// Target region entries info.
class OffloadEntryInfoTargetRegion final : public OffloadEntryInfo {
/// Address that can be used as the ID of the entry.
llvm::Constant *ID = nullptr;
public:
OffloadEntryInfoTargetRegion()
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion) {}
explicit OffloadEntryInfoTargetRegion(unsigned Order,
llvm::Constant *Addr,
llvm::Constant *ID,
OMPTargetRegionEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoTargetRegion, Order, Flags),
ID(ID) {
setAddress(Addr);
}
llvm::Constant *getID() const { return ID; }
void setID(llvm::Constant *V) {
assert(!ID && "ID has been set before!");
ID = V;
}
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoTargetRegion;
}
};
/// Initialize target region entry.
void initializeTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
unsigned Order);
/// Register target region entry.
void registerTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum,
llvm::Constant *Addr, llvm::Constant *ID,
OMPTargetRegionEntryKind Flags);
/// Return true if a target region entry with the provided information
/// exists.
bool hasTargetRegionEntryInfo(unsigned DeviceID, unsigned FileID,
StringRef ParentName, unsigned LineNum) const;
/// brief Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(unsigned, unsigned, StringRef, unsigned,
const OffloadEntryInfoTargetRegion &)>
OffloadTargetRegionEntryInfoActTy;
void actOnTargetRegionEntriesInfo(
const OffloadTargetRegionEntryInfoActTy &Action);
//
// Device global variable entries related.
//
/// Kind of the global variable entry..
enum OMPTargetGlobalVarEntryKind : uint32_t {
/// Mark the entry as a to declare target.
OMPTargetGlobalVarEntryTo = 0x0,
/// Mark the entry as a to declare target link.
OMPTargetGlobalVarEntryLink = 0x1,
};
/// Device global variable entries info.
class OffloadEntryInfoDeviceGlobalVar final : public OffloadEntryInfo {
/// Type of the global variable.
CharUnits VarSize;
llvm::GlobalValue::LinkageTypes Linkage;
public:
OffloadEntryInfoDeviceGlobalVar()
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar) {}
explicit OffloadEntryInfoDeviceGlobalVar(unsigned Order,
OMPTargetGlobalVarEntryKind Flags)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags) {}
explicit OffloadEntryInfoDeviceGlobalVar(
unsigned Order, llvm::Constant *Addr, CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage)
: OffloadEntryInfo(OffloadingEntryInfoDeviceGlobalVar, Order, Flags),
VarSize(VarSize), Linkage(Linkage) {
setAddress(Addr);
}
CharUnits getVarSize() const { return VarSize; }
void setVarSize(CharUnits Size) { VarSize = Size; }
llvm::GlobalValue::LinkageTypes getLinkage() const { return Linkage; }
void setLinkage(llvm::GlobalValue::LinkageTypes LT) { Linkage = LT; }
static bool classof(const OffloadEntryInfo *Info) {
return Info->getKind() == OffloadingEntryInfoDeviceGlobalVar;
}
};
/// Initialize device global variable entry.
void initializeDeviceGlobalVarEntryInfo(StringRef Name,
OMPTargetGlobalVarEntryKind Flags,
unsigned Order);
/// Register device global variable entry.
void
registerDeviceGlobalVarEntryInfo(StringRef VarName, llvm::Constant *Addr,
CharUnits VarSize,
OMPTargetGlobalVarEntryKind Flags,
llvm::GlobalValue::LinkageTypes Linkage);
/// Checks if the variable with the given name has been registered already.
bool hasDeviceGlobalVarEntryInfo(StringRef VarName) const {
return OffloadEntriesDeviceGlobalVar.count(VarName) > 0;
}
/// Applies action \a Action on all registered entries.
typedef llvm::function_ref<void(StringRef,
const OffloadEntryInfoDeviceGlobalVar &)>
OffloadDeviceGlobalVarEntryInfoActTy;
void actOnDeviceGlobalVarEntriesInfo(
const OffloadDeviceGlobalVarEntryInfoActTy &Action);
private:
// Storage for target region entries kind. The storage is to be indexed by
// file ID, device ID, parent function name and line number.
typedef llvm::DenseMap<unsigned, OffloadEntryInfoTargetRegion>
OffloadEntriesTargetRegionPerLine;
typedef llvm::StringMap<OffloadEntriesTargetRegionPerLine>
OffloadEntriesTargetRegionPerParentName;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerParentName>
OffloadEntriesTargetRegionPerFile;
typedef llvm::DenseMap<unsigned, OffloadEntriesTargetRegionPerFile>
OffloadEntriesTargetRegionPerDevice;
typedef OffloadEntriesTargetRegionPerDevice OffloadEntriesTargetRegionTy;
OffloadEntriesTargetRegionTy OffloadEntriesTargetRegion;
/// Storage for device global variable entries kind. The storage is to be
/// indexed by mangled name.
typedef llvm::StringMap<OffloadEntryInfoDeviceGlobalVar>
OffloadEntriesDeviceGlobalVarTy;
OffloadEntriesDeviceGlobalVarTy OffloadEntriesDeviceGlobalVar;
};
OffloadEntriesInfoManagerTy OffloadEntriesInfoManager;
bool ShouldMarkAsGlobal = true;
/// List of the emitted declarations.
llvm::DenseSet<CanonicalDeclPtr<const Decl>> AlreadyEmittedTargetDecls;
/// List of the global variables with their addresses that should not be
/// emitted for the target.
llvm::StringMap<llvm::WeakTrackingVH> EmittedNonTargetVariables;
/// List of variables that can become declare target implicitly and, thus,
/// must be emitted.
llvm::SmallDenseSet<const VarDecl *> DeferredGlobalVariables;
/// Mapping of the original functions to their variants and original global
/// decl.
llvm::MapVector<CanonicalDeclPtr<const FunctionDecl>,
std::pair<GlobalDecl, GlobalDecl>>
DeferredVariantFunction;
using NontemporalDeclsSet = llvm::SmallDenseSet<CanonicalDeclPtr<const Decl>>;
/// Stack for list of declarations in current context marked as nontemporal.
/// The set is the union of all current stack elements.
llvm::SmallVector<NontemporalDeclsSet, 4> NontemporalDeclsStack;
/// Stack for list of addresses of declarations in current context marked as
/// lastprivate conditional. The set is the union of all current stack
/// elements.
llvm::SmallVector<LastprivateConditionalData, 4> LastprivateConditionalStack;
/// Flag for keeping track of weather a requires unified_shared_memory
/// directive is present.
bool HasRequiresUnifiedSharedMemory = false;
/// Flag for keeping track of weather a target region has been emitted.
bool HasEmittedTargetRegion = false;
/// Flag for keeping track of weather a device routine has been emitted.
/// Device routines are specific to the
bool HasEmittedDeclareTargetRegion = false;
/// Loads all the offload entries information from the host IR
/// metadata.
void loadOffloadInfoMetadata();
/// Returns __tgt_offload_entry type.
QualType getTgtOffloadEntryQTy();
/// Returns __tgt_device_image type.
QualType getTgtDeviceImageQTy();
/// Returns __tgt_bin_desc type.
QualType getTgtBinaryDescriptorQTy();
/// Start scanning from statement \a S and and emit all target regions
/// found along the way.
/// \param S Starting statement.
/// \param ParentName Name of the function declaration that is being scanned.
void scanForTargetRegionsFunctions(const Stmt *S, StringRef ParentName);
/// Build type kmp_routine_entry_t (if not built yet).
void emitKmpRoutineEntryT(QualType KmpInt32Ty);
/// Returns pointer to kmpc_micro type.
llvm::Type *getKmpc_MicroPointerTy();
/// Returns specified OpenMP runtime function.
/// \param Function OpenMP runtime function.
/// \return Specified function.
llvm::FunctionCallee createRuntimeFunction(unsigned Function);
/// Returns __kmpc_for_static_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createForStaticInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_init_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchInitFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_next_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchNextFunction(unsigned IVSize,
bool IVSigned);
/// Returns __kmpc_dispatch_fini_* runtime function for the specified
/// size \a IVSize and sign \a IVSigned.
llvm::FunctionCallee createDispatchFiniFunction(unsigned IVSize,
bool IVSigned);
/// If the specified mangled name is not in the module, create and
/// return threadprivate cache object. This object is a pointer's worth of
/// storage that's reserved for use by the OpenMP runtime.
/// \param VD Threadprivate variable.
/// \return Cache variable for the specified threadprivate.
llvm::Constant *getOrCreateThreadPrivateCache(const VarDecl *VD);
/// Gets (if variable with the given name already exist) or creates
/// internal global variable with the specified Name. The created variable has
/// linkage CommonLinkage by default and is initialized by null value.
/// \param Ty Type of the global variable. If it is exist already the type
/// must be the same.
/// \param Name Name of the variable.
llvm::Constant *getOrCreateInternalVariable(llvm::Type *Ty,
const llvm::Twine &Name,
unsigned AddressSpace = 0);
/// Set of threadprivate variables with the generated initializer.
llvm::StringSet<> ThreadPrivateWithDefinition;
/// Set of declare target variables with the generated initializer.
llvm::StringSet<> DeclareTargetWithDefinition;
/// Emits initialization code for the threadprivate variables.
/// \param VDAddr Address of the global variable \a VD.
/// \param Ctor Pointer to a global init function for \a VD.
/// \param CopyCtor Pointer to a global copy function for \a VD.
/// \param Dtor Pointer to a global destructor function for \a VD.
/// \param Loc Location of threadprivate declaration.
void emitThreadPrivateVarInit(CodeGenFunction &CGF, Address VDAddr,
llvm::Value *Ctor, llvm::Value *CopyCtor,
llvm::Value *Dtor, SourceLocation Loc);
/// Emit the array initialization or deletion portion for user-defined mapper
/// code generation.
void emitUDMapperArrayInitOrDel(CodeGenFunction &MapperCGF,
llvm::Value *Handle, llvm::Value *BasePtr,
llvm::Value *Ptr, llvm::Value *Size,
llvm::Value *MapType, CharUnits ElementSize,
llvm::BasicBlock *ExitBB, bool IsInit);
struct TaskResultTy {
llvm::Value *NewTask = nullptr;
llvm::Function *TaskEntry = nullptr;
llvm::Value *NewTaskNewTaskTTy = nullptr;
LValue TDBase;
const RecordDecl *KmpTaskTQTyRD = nullptr;
llvm::Value *TaskDupFn = nullptr;
};
/// Emit task region for the task directive. The task region is emitted in
/// several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
TaskResultTy emitTaskInit(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const OMPTaskDataTy &Data);
/// Returns default address space for the constant firstprivates, 0 by
/// default.
virtual unsigned getDefaultFirstprivateAddressSpace() const { return 0; }
/// Emit code that pushes the trip count of loops associated with constructs
/// 'target teams distribute' and 'teams distribute parallel for'.
/// \param SizeEmitter Emits the int64 value for the number of iterations of
/// the associated loop.
void emitTargetNumIterationsCall(
CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Value *DeviceID,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
public:
explicit CGOpenMPRuntime(CodeGenModule &CGM)
: CGOpenMPRuntime(CGM, ".", ".") {}
virtual ~CGOpenMPRuntime() {}
virtual void clear();
/// Emits code for OpenMP 'if' clause using specified \a CodeGen
/// function. Here is the logic:
/// if (Cond) {
/// ThenGen();
/// } else {
/// ElseGen();
/// }
void emitIfClause(CodeGenFunction &CGF, const Expr *Cond,
const RegionCodeGenTy &ThenGen,
const RegionCodeGenTy &ElseGen);
/// Checks if the \p Body is the \a CompoundStmt and returns its child
/// statement iff there is only one that is not evaluatable at the compile
/// time.
static const Stmt *getSingleCompoundChild(ASTContext &Ctx, const Stmt *Body);
/// Get the platform-specific name separator.
std::string getName(ArrayRef<StringRef> Parts) const;
/// Emit code for the specified user defined reduction construct.
virtual void emitUserDefinedReduction(CodeGenFunction *CGF,
const OMPDeclareReductionDecl *D);
/// Get combiner/initializer for the specified user-defined reduction, if any.
virtual std::pair<llvm::Function *, llvm::Function *>
getUserDefinedReduction(const OMPDeclareReductionDecl *D);
/// Emit the function for the user defined mapper construct.
void emitUserDefinedMapper(const OMPDeclareMapperDecl *D,
CodeGenFunction *CGF = nullptr);
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitParallelOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
virtual llvm::Function *emitTeamsOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen);
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
virtual llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts);
/// Cleans up references to the objects in finished function.
///
virtual void functionFinished(CodeGenFunction &CGF);
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
virtual void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond);
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
virtual void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr);
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
virtual void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc);
/// Emits code for a taskyield directive.
virtual void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
virtual void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc);
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
virtual void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen,
SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps);
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
virtual void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads);
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
virtual void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind,
bool EmitChecks = true,
bool ForceSimpleCall = false);
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// This kind of distribute directive is emitted without outer loop.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticNonchunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static chunked.
/// \param ScheduleKind Schedule kind specified in the 'schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is static non-chunked.
/// \param ScheduleKind Schedule kind specified in the 'dist_schedule' clause.
/// \param Chunked True if chunk is specified in the clause.
///
virtual bool isStaticChunked(OpenMPDistScheduleClauseKind ScheduleKind,
bool Chunked) const;
/// Check if the specified \a ScheduleKind is dynamic.
/// This kind of worksharing directive is emitted without outer loop.
/// \param ScheduleKind Schedule Kind specified in the 'schedule' clause.
///
virtual bool isDynamic(OpenMPScheduleClauseKind ScheduleKind) const;
/// struct with the values to be passed to the dispatch runtime function
struct DispatchRTInput {
/// Loop lower bound
llvm::Value *LB = nullptr;
/// Loop upper bound
llvm::Value *UB = nullptr;
/// Chunk size specified using 'schedule' clause (nullptr if chunk
/// was not specified)
llvm::Value *Chunk = nullptr;
DispatchRTInput() = default;
DispatchRTInput(llvm::Value *LB, llvm::Value *UB, llvm::Value *Chunk)
: LB(LB), UB(UB), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
virtual void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues);
/// Struct with the values to be passed to the static runtime function
struct StaticRTInput {
/// Size of the iteration variable in bits.
unsigned IVSize = 0;
/// Sign of the iteration variable.
bool IVSigned = false;
/// true if loop is ordered, false otherwise.
bool Ordered = false;
/// Address of the output variable in which the flag of the last iteration
/// is returned.
Address IL = Address::invalid();
/// Address of the output variable in which the lower iteration number is
/// returned.
Address LB = Address::invalid();
/// Address of the output variable in which the upper iteration number is
/// returned.
Address UB = Address::invalid();
/// Address of the output variable in which the stride value is returned
/// necessary to generated the static_chunked scheduled loop.
Address ST = Address::invalid();
/// Value of the chunk for the static_chunked scheduled loop. For the
/// default (nullptr) value, the chunk 1 will be used.
llvm::Value *Chunk = nullptr;
StaticRTInput(unsigned IVSize, bool IVSigned, bool Ordered, Address IL,
Address LB, Address UB, Address ST,
llvm::Value *Chunk = nullptr)
: IVSize(IVSize), IVSigned(IVSigned), Ordered(Ordered), IL(IL), LB(LB),
UB(UB), ST(ST), Chunk(Chunk) {}
};
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values);
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
virtual void emitDistributeStaticInit(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values);
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
virtual void emitForOrderedIterationEnd(CodeGenFunction &CGF,
SourceLocation Loc, unsigned IVSize,
bool IVSigned);
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
virtual void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind);
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
virtual llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned,
Address IL, Address LB,
Address UB, Address ST);
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
virtual void emitNumThreadsClause(CodeGenFunction &CGF,
llvm::Value *NumThreads,
SourceLocation Loc);
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
virtual void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc);
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
virtual Address getAddrOfThreadPrivate(CodeGenFunction &CGF,
const VarDecl *VD,
Address VDAddr,
SourceLocation Loc);
/// Returns the address of the variable marked as declare target with link
/// clause OR as declare target with to clause and unified memory.
virtual Address getAddrOfDeclareTargetVar(const VarDecl *VD);
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
virtual llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr);
/// Emit a code for initialization of declare target variable.
/// \param VD Declare target variable.
/// \param Addr Address of the global variable \a VD.
/// \param PerformInit true if initialization expression is not constant.
virtual bool emitDeclareTargetVarDefinition(const VarDecl *VD,
llvm::GlobalVariable *Addr,
bool PerformInit);
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
virtual Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name);
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
virtual void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc);
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data);
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
virtual void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D,
llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds,
const Expr *IfCond, const OMPTaskDataTy &Data);
/// Emit code for the directive that does not require outlining.
///
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param HasCancel true if region has inner cancel directive, false
/// otherwise.
virtual void emitInlinedDirective(CodeGenFunction &CGF,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen,
bool HasCancel = false);
/// Emits reduction function.
/// \param ArgsType Array type containing pointers to reduction variables.
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
llvm::Function *emitReductionFunction(SourceLocation Loc,
llvm::Type *ArgsType,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps);
/// Emits single reduction combiner
void emitSingleReductionCombiner(CodeGenFunction &CGF,
const Expr *ReductionOp,
const Expr *PrivateRef,
const DeclRefExpr *LHS,
const DeclRefExpr *RHS);
struct ReductionOptionsTy {
bool WithNowait;
bool SimpleReduction;
OpenMPDirectiveKind ReductionKind;
};
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
virtual void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options);
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _task_red_item_t red_data[n];
/// ...
/// red_data[i].shar = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data);
/// \endcode
///
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
virtual llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF,
SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data);
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
virtual void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N);
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
virtual Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal);
/// Emit code for 'taskwait' directive.
virtual void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc);
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
virtual void emitCancellationPointCall(CodeGenFunction &CGF,
SourceLocation Loc,
OpenMPDirectiveKind CancelRegion);
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
virtual void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion);
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
virtual void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen);
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param SizeEmitter Callback to emit number of iterations for loop-based
/// directives.
virtual void
emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID,
const Expr *IfCond, const Expr *Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter);
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
virtual bool emitTargetFunctions(GlobalDecl GD);
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
virtual bool emitTargetGlobalVariable(GlobalDecl GD);
/// Checks if the provided global decl \a GD is a declare target variable and
/// registers it when emitting code for the host.
virtual void registerTargetGlobalVariable(const VarDecl *VD,
llvm::Constant *Addr);
/// Registers provided target firstprivate variable as global on the
/// target.
llvm::Constant *registerTargetFirstprivateCopy(CodeGenFunction &CGF,
const VarDecl *VD);
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
virtual bool emitTargetGlobal(GlobalDecl GD);
/// Creates and returns a registration function for when at least one
/// requires directives was used in the current module.
llvm::Function *emitRequiresDirectiveRegFun();
/// Creates all the offload entries in the current compilation unit
/// along with the associated metadata.
void createOffloadEntriesAndInfoMetadata();
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
virtual void emitTeamsCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars);
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
virtual void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc);
/// Struct that keeps all the relevant information that should be kept
/// throughout a 'target data' region.
class TargetDataInfo {
/// Set to true if device pointer information have to be obtained.
bool RequiresDevicePointerInfo = false;
public:
/// The array of base pointer passed to the runtime library.
llvm::Value *BasePointersArray = nullptr;
/// The array of section pointers passed to the runtime library.
llvm::Value *PointersArray = nullptr;
/// The array of sizes passed to the runtime library.
llvm::Value *SizesArray = nullptr;
/// The array of map types passed to the runtime library.
llvm::Value *MapTypesArray = nullptr;
/// The total number of pointers passed to the runtime library.
unsigned NumberOfPtrs = 0u;
/// Map between the a declaration of a capture and the corresponding base
/// pointer address where the runtime returns the device pointers.
llvm::DenseMap<const ValueDecl *, Address> CaptureDeviceAddrMap;
explicit TargetDataInfo() {}
explicit TargetDataInfo(bool RequiresDevicePointerInfo)
: RequiresDevicePointerInfo(RequiresDevicePointerInfo) {}
/// Clear information about the data arrays.
void clearArrayInfo() {
BasePointersArray = nullptr;
PointersArray = nullptr;
SizesArray = nullptr;
MapTypesArray = nullptr;
NumberOfPtrs = 0u;
}
/// Return true if the current target data information has valid arrays.
bool isValid() {
return BasePointersArray && PointersArray && SizesArray &&
MapTypesArray && NumberOfPtrs;
}
bool requiresDevicePointerInfo() { return RequiresDevicePointerInfo; }
};
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
virtual void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond, const Expr *Device,
const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info);
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
virtual void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device);
/// Marks function \a Fn with properly mangled versions of vector functions.
/// \param FD Function marked as 'declare simd'.
/// \param Fn LLVM function that must be marked with 'declare simd'
/// attributes.
virtual void emitDeclareSimdFunction(const FunctionDecl *FD,
llvm::Function *Fn);
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
virtual void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations);
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
virtual void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C);
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
virtual const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const {
return NativeParam;
}
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
virtual Address getParameterAddress(CodeGenFunction &CGF,
const VarDecl *NativeParam,
const VarDecl *TargetParam) const;
/// Choose default schedule type and chunk value for the
/// dist_schedule clause.
virtual void getDefaultDistScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPDistScheduleClauseKind &ScheduleKind,
llvm::Value *&Chunk) const {}
/// Choose default schedule type and chunk value for the
/// schedule clause.
virtual void getDefaultScheduleAndChunk(CodeGenFunction &CGF,
const OMPLoopDirective &S, OpenMPScheduleClauseKind &ScheduleKind,
const Expr *&ChunkExpr) const;
/// Emits call of the outlined function with the provided arguments,
/// translating these arguments to correct target-specific arguments.
virtual void
emitOutlinedFunctionCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::FunctionCallee OutlinedFn,
ArrayRef<llvm::Value *> Args = llvm::None) const;
/// Emits OpenMP-specific function prolog.
/// Required for device constructs.
virtual void emitFunctionProlog(CodeGenFunction &CGF, const Decl *D);
/// Gets the OpenMP-specific address of the local variable.
virtual Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD);
/// Marks the declaration as already emitted for the device code and returns
/// true, if it was marked already, and false, otherwise.
bool markAsGlobalTarget(GlobalDecl GD);
/// Emit deferred declare target variables marked for deferred emission.
void emitDeferredTargetDecls() const;
/// Adjust some parameters for the target-based directives, like addresses of
/// the variables captured by reference in lambdas.
virtual void
adjustTargetSpecificDataForLambdas(CodeGenFunction &CGF,
const OMPExecutableDirective &D) const;
/// Perform check on requires decl to ensure that target architecture
/// supports unified addressing
virtual void checkArchForUnifiedAddressing(const OMPRequiresDecl *D);
/// Checks if the variable has associated OMPAllocateDeclAttr attribute with
/// the predefined allocator and translates it into the corresponding address
/// space.
virtual bool hasAllocateAttributeForGlobalVar(const VarDecl *VD, LangAS &AS);
/// Return whether the unified_shared_memory has been specified.
bool hasRequiresUnifiedSharedMemory() const;
/// Emits the definition of the declare variant function.
virtual bool emitDeclareVariant(GlobalDecl GD, bool IsForDefinition);
/// Checks if the \p VD variable is marked as nontemporal declaration in
/// current context.
bool isNontemporalDecl(const ValueDecl *VD) const;
/// Initializes global counter for lastprivate conditional.
virtual void
initLastprivateConditionalCounter(CodeGenFunction &CGF,
const OMPExecutableDirective &S);
/// Checks if the provided \p LVal is lastprivate conditional and emits the
/// code to update the value of the original variable.
/// \code
/// lastprivate(conditional: a)
/// ...
/// <type> a;
/// lp_a = ...;
/// #pragma omp critical(a)
/// if (last_iv_a <= iv) {
/// last_iv_a = iv;
/// global_a = lp_a;
/// }
/// \endcode
virtual void checkAndEmitLastprivateConditional(CodeGenFunction &CGF,
const Expr *LHS);
/// Gets the address of the global copy used for lastprivate conditional
/// update, if any.
/// \param PrivLVal LValue for the private copy.
/// \param VD Original lastprivate declaration.
virtual void emitLastprivateConditionalFinalUpdate(CodeGenFunction &CGF,
LValue PrivLVal,
const VarDecl *VD,
SourceLocation Loc);
};
/// Class supports emissionof SIMD-only code.
class CGOpenMPSIMDRuntime final : public CGOpenMPRuntime {
public:
explicit CGOpenMPSIMDRuntime(CodeGenModule &CGM) : CGOpenMPRuntime(CGM) {}
~CGOpenMPSIMDRuntime() override {}
/// Emits outlined function for the specified OpenMP parallel directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitParallelOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the specified OpenMP teams directive
/// \a D. This outlined function has type void(*)(kmp_int32 *ThreadID,
/// kmp_int32 BoundID, struct context_vars*).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
llvm::Function *
emitTeamsOutlinedFunction(const OMPExecutableDirective &D,
const VarDecl *ThreadIDVar,
OpenMPDirectiveKind InnermostKind,
const RegionCodeGenTy &CodeGen) override;
/// Emits outlined function for the OpenMP task directive \a D. This
/// outlined function has type void(*)(kmp_int32 ThreadID, struct task_t*
/// TaskT).
/// \param D OpenMP directive.
/// \param ThreadIDVar Variable for thread id in the current OpenMP region.
/// \param PartIDVar Variable for partition id in the current OpenMP untied
/// task region.
/// \param TaskTVar Variable for task_t argument.
/// \param InnermostKind Kind of innermost directive (for simple directives it
/// is a directive itself, for combined - its innermost directive).
/// \param CodeGen Code generation sequence for the \a D directive.
/// \param Tied true if task is generated for tied task, false otherwise.
/// \param NumberOfParts Number of parts in untied task. Ignored for tied
/// tasks.
///
llvm::Function *emitTaskOutlinedFunction(
const OMPExecutableDirective &D, const VarDecl *ThreadIDVar,
const VarDecl *PartIDVar, const VarDecl *TaskTVar,
OpenMPDirectiveKind InnermostKind, const RegionCodeGenTy &CodeGen,
bool Tied, unsigned &NumberOfParts) override;
/// Emits code for parallel or serial call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run in parallel threads. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
///
void emitParallelCall(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars,
const Expr *IfCond) override;
/// Emits a critical region.
/// \param CriticalName Name of the critical region.
/// \param CriticalOpGen Generator for the statement associated with the given
/// critical region.
/// \param Hint Value of the 'hint' clause (optional).
void emitCriticalRegion(CodeGenFunction &CGF, StringRef CriticalName,
const RegionCodeGenTy &CriticalOpGen,
SourceLocation Loc,
const Expr *Hint = nullptr) override;
/// Emits a master region.
/// \param MasterOpGen Generator for the statement associated with the given
/// master region.
void emitMasterRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &MasterOpGen,
SourceLocation Loc) override;
/// Emits code for a taskyield directive.
void emitTaskyieldCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit a taskgroup region.
/// \param TaskgroupOpGen Generator for the statement associated with the
/// given taskgroup region.
void emitTaskgroupRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &TaskgroupOpGen,
SourceLocation Loc) override;
/// Emits a single region.
/// \param SingleOpGen Generator for the statement associated with the given
/// single region.
void emitSingleRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &SingleOpGen, SourceLocation Loc,
ArrayRef<const Expr *> CopyprivateVars,
ArrayRef<const Expr *> DestExprs,
ArrayRef<const Expr *> SrcExprs,
ArrayRef<const Expr *> AssignmentOps) override;
/// Emit an ordered region.
/// \param OrderedOpGen Generator for the statement associated with the given
/// ordered region.
void emitOrderedRegion(CodeGenFunction &CGF,
const RegionCodeGenTy &OrderedOpGen,
SourceLocation Loc, bool IsThreads) override;
/// Emit an implicit/explicit barrier for OpenMP threads.
/// \param Kind Directive for which this implicit barrier call must be
/// generated. Must be OMPD_barrier for explicit barrier generation.
/// \param EmitChecks true if need to emit checks for cancellation barriers.
/// \param ForceSimpleCall true simple barrier call must be emitted, false if
/// runtime class decides which one to emit (simple or with cancellation
/// checks).
///
void emitBarrierCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind Kind, bool EmitChecks = true,
bool ForceSimpleCall = false) override;
/// This is used for non static scheduled types and when the ordered
/// clause is present on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds \a LB and \a UB and stride \a ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param Ordered true if loop is ordered, false otherwise.
/// \param DispatchValues struct containing llvm values for lower bound, upper
/// bound, and chunk expression.
/// For the default (nullptr) value, the chunk 1 will be used.
///
void emitForDispatchInit(CodeGenFunction &CGF, SourceLocation Loc,
const OpenMPScheduleTy &ScheduleKind,
unsigned IVSize, bool IVSigned, bool Ordered,
const DispatchRTInput &DispatchValues) override;
/// Call the appropriate runtime routine to initialize it before start
/// of loop.
///
/// This is used only in case of static schedule, when the user did not
/// specify a ordered clause on the loop construct.
/// Depending on the loop schedule, it is necessary to call some runtime
/// routine before start of the OpenMP loop to get the loop upper / lower
/// bounds LB and UB and stride ST.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive.
/// \param ScheduleKind Schedule kind, specified by the 'schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitForStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind,
const OpenMPScheduleTy &ScheduleKind,
const StaticRTInput &Values) override;
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param SchedKind Schedule kind, specified by the 'dist_schedule' clause.
/// \param Values Input arguments for the construct.
///
void emitDistributeStaticInit(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDistScheduleClauseKind SchedKind,
const StaticRTInput &Values) override;
/// Call the appropriate runtime routine to notify that we finished
/// iteration of the ordered loop with the dynamic scheduling.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
///
void emitForOrderedIterationEnd(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned) override;
/// Call the appropriate runtime routine to notify that we finished
/// all the work with current loop.
///
/// \param CGF Reference to current CodeGenFunction.
/// \param Loc Clang source location.
/// \param DKind Kind of the directive for which the static finish is emitted.
///
void emitForStaticFinish(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind DKind) override;
/// Call __kmpc_dispatch_next(
/// ident_t *loc, kmp_int32 tid, kmp_int32 *p_lastiter,
/// kmp_int[32|64] *p_lower, kmp_int[32|64] *p_upper,
/// kmp_int[32|64] *p_stride);
/// \param IVSize Size of the iteration variable in bits.
/// \param IVSigned Sign of the iteration variable.
/// \param IL Address of the output variable in which the flag of the
/// last iteration is returned.
/// \param LB Address of the output variable in which the lower iteration
/// number is returned.
/// \param UB Address of the output variable in which the upper iteration
/// number is returned.
/// \param ST Address of the output variable in which the stride value is
/// returned.
llvm::Value *emitForNext(CodeGenFunction &CGF, SourceLocation Loc,
unsigned IVSize, bool IVSigned, Address IL,
Address LB, Address UB, Address ST) override;
/// Emits call to void __kmpc_push_num_threads(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_threads) to generate code for 'num_threads'
/// clause.
/// \param NumThreads An integer value of threads.
void emitNumThreadsClause(CodeGenFunction &CGF, llvm::Value *NumThreads,
SourceLocation Loc) override;
/// Emit call to void __kmpc_push_proc_bind(ident_t *loc, kmp_int32
/// global_tid, int proc_bind) to generate code for 'proc_bind' clause.
void emitProcBindClause(CodeGenFunction &CGF,
llvm::omp::ProcBindKind ProcBind,
SourceLocation Loc) override;
/// Returns address of the threadprivate variable for the current
/// thread.
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of the reference to threadprivate var.
/// \return Address of the threadprivate variable for the current thread.
Address getAddrOfThreadPrivate(CodeGenFunction &CGF, const VarDecl *VD,
Address VDAddr, SourceLocation Loc) override;
/// Emit a code for initialization of threadprivate variable. It emits
/// a call to runtime library which adds initial value to the newly created
/// threadprivate variable (if it is not constant) and registers destructor
/// for the variable (if any).
/// \param VD Threadprivate variable.
/// \param VDAddr Address of the global variable \a VD.
/// \param Loc Location of threadprivate declaration.
/// \param PerformInit true if initialization expression is not constant.
llvm::Function *
emitThreadPrivateVarDefinition(const VarDecl *VD, Address VDAddr,
SourceLocation Loc, bool PerformInit,
CodeGenFunction *CGF = nullptr) override;
/// Creates artificial threadprivate variable with name \p Name and type \p
/// VarType.
/// \param VarType Type of the artificial threadprivate variable.
/// \param Name Name of the artificial threadprivate variable.
Address getAddrOfArtificialThreadPrivate(CodeGenFunction &CGF,
QualType VarType,
StringRef Name) override;
/// Emit flush of the variables specified in 'omp flush' directive.
/// \param Vars List of variables to flush.
void emitFlush(CodeGenFunction &CGF, ArrayRef<const Expr *> Vars,
SourceLocation Loc) override;
/// Emit task region for the task directive. The task region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to kmp_int32 __kmpc_omp_task(ident_t *, kmp_int32 gtid,
/// kmp_task_t *new_task), where new_task is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPExecutableDirective &D,
llvm::Function *TaskFunction, QualType SharedsTy,
Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit task region for the taskloop directive. The taskloop region is
/// emitted in several steps:
/// 1. Emit a call to kmp_task_t *__kmpc_omp_task_alloc(ident_t *, kmp_int32
/// gtid, kmp_int32 flags, size_t sizeof_kmp_task_t, size_t sizeof_shareds,
/// kmp_routine_entry_t *task_entry). Here task_entry is a pointer to the
/// function:
/// kmp_int32 .omp_task_entry.(kmp_int32 gtid, kmp_task_t *tt) {
/// TaskFunction(gtid, tt->part_id, tt->shareds);
/// return 0;
/// }
/// 2. Copy a list of shared variables to field shareds of the resulting
/// structure kmp_task_t returned by the previous call (if any).
/// 3. Copy a pointer to destructions function to field destructions of the
/// resulting structure kmp_task_t.
/// 4. Emit a call to void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t
/// *task, int if_val, kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int
/// nogroup, int sched, kmp_uint64 grainsize, void *task_dup ), where new_task
/// is a resulting structure from
/// previous items.
/// \param D Current task directive.
/// \param TaskFunction An LLVM function with type void (*)(i32 /*gtid*/, i32
/// /*part_id*/, captured_struct */*__context*/);
/// \param SharedsTy A type which contains references the shared variables.
/// \param Shareds Context with the list of shared variables from the \p
/// TaskFunction.
/// \param IfCond Not a nullptr if 'if' clause was specified, nullptr
/// otherwise.
/// \param Data Additional data for task generation like tiednsee, final
/// state, list of privates etc.
void emitTaskLoopCall(CodeGenFunction &CGF, SourceLocation Loc,
const OMPLoopDirective &D, llvm::Function *TaskFunction,
QualType SharedsTy, Address Shareds, const Expr *IfCond,
const OMPTaskDataTy &Data) override;
/// Emit a code for reduction clause. Next code should be emitted for
/// reduction:
/// \code
///
/// static kmp_critical_name lock = { 0 };
///
/// void reduce_func(void *lhs[<n>], void *rhs[<n>]) {
/// ...
/// *(Type<i>*)lhs[i] = RedOp<i>(*(Type<i>*)lhs[i], *(Type<i>*)rhs[i]);
/// ...
/// }
///
/// ...
/// void *RedList[<n>] = {&<RHSExprs>[0], ..., &<RHSExprs>[<n>-1]};
/// switch (__kmpc_reduce{_nowait}(<loc>, <gtid>, <n>, sizeof(RedList),
/// RedList, reduce_func, &<lock>)) {
/// case 1:
/// ...
/// <LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]);
/// ...
/// __kmpc_end_reduce{_nowait}(<loc>, <gtid>, &<lock>);
/// break;
/// case 2:
/// ...
/// Atomic(<LHSExprs>[i] = RedOp<i>(*<LHSExprs>[i], *<RHSExprs>[i]));
/// ...
/// break;
/// default:;
/// }
/// \endcode
///
/// \param Privates List of private copies for original reduction arguments.
/// \param LHSExprs List of LHS in \a ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a ReductionOps reduction operations.
/// \param ReductionOps List of reduction operations in form 'LHS binop RHS'
/// or 'operator binop(LHS, RHS)'.
/// \param Options List of options for reduction codegen:
/// WithNowait true if parent directive has also nowait clause, false
/// otherwise.
/// SimpleReduction Emit reduction operation only. Used for omp simd
/// directive on the host.
/// ReductionKind The kind of reduction to perform.
void emitReduction(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> Privates,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
ArrayRef<const Expr *> ReductionOps,
ReductionOptionsTy Options) override;
/// Emit a code for initialization of task reduction clause. Next code
/// should be emitted for reduction:
/// \code
///
/// _task_red_item_t red_data[n];
/// ...
/// red_data[i].shar = &origs[i];
/// red_data[i].size = sizeof(origs[i]);
/// red_data[i].f_init = (void*)RedInit<i>;
/// red_data[i].f_fini = (void*)RedDest<i>;
/// red_data[i].f_comb = (void*)RedOp<i>;
/// red_data[i].flags = <Flag_i>;
/// ...
/// void* tg1 = __kmpc_task_reduction_init(gtid, n, red_data);
/// \endcode
///
/// \param LHSExprs List of LHS in \a Data.ReductionOps reduction operations.
/// \param RHSExprs List of RHS in \a Data.ReductionOps reduction operations.
/// \param Data Additional data for task generation like tiedness, final
/// state, list of privates, reductions etc.
llvm::Value *emitTaskReductionInit(CodeGenFunction &CGF, SourceLocation Loc,
ArrayRef<const Expr *> LHSExprs,
ArrayRef<const Expr *> RHSExprs,
const OMPTaskDataTy &Data) override;
/// Required to resolve existing problems in the runtime. Emits threadprivate
/// variables to store the size of the VLAs/array sections for
/// initializer/combiner/finalizer functions + emits threadprivate variable to
/// store the pointer to the original reduction item for the custom
/// initializer defined by declare reduction construct.
/// \param RCG Allows to reuse an existing data for the reductions.
/// \param N Reduction item for which fixups must be emitted.
void emitTaskReductionFixups(CodeGenFunction &CGF, SourceLocation Loc,
ReductionCodeGen &RCG, unsigned N) override;
/// Get the address of `void *` type of the privatue copy of the reduction
/// item specified by the \p SharedLVal.
/// \param ReductionsPtr Pointer to the reduction data returned by the
/// emitTaskReductionInit function.
/// \param SharedLVal Address of the original reduction item.
Address getTaskReductionItem(CodeGenFunction &CGF, SourceLocation Loc,
llvm::Value *ReductionsPtr,
LValue SharedLVal) override;
/// Emit code for 'taskwait' directive.
void emitTaskwaitCall(CodeGenFunction &CGF, SourceLocation Loc) override;
/// Emit code for 'cancellation point' construct.
/// \param CancelRegion Region kind for which the cancellation point must be
/// emitted.
///
void emitCancellationPointCall(CodeGenFunction &CGF, SourceLocation Loc,
OpenMPDirectiveKind CancelRegion) override;
/// Emit code for 'cancel' construct.
/// \param IfCond Condition in the associated 'if' clause, if it was
/// specified, nullptr otherwise.
/// \param CancelRegion Region kind for which the cancel must be emitted.
///
void emitCancelCall(CodeGenFunction &CGF, SourceLocation Loc,
const Expr *IfCond,
OpenMPDirectiveKind CancelRegion) override;
/// Emit outilined function for 'target' directive.
/// \param D Directive to emit.
/// \param ParentName Name of the function that encloses the target region.
/// \param OutlinedFn Outlined function value to be defined by this call.
/// \param OutlinedFnID Outlined function ID value to be defined by this call.
/// \param IsOffloadEntry True if the outlined function is an offload entry.
/// \param CodeGen Code generation sequence for the \a D directive.
/// An outlined function may not be an entry if, e.g. the if clause always
/// evaluates to false.
void emitTargetOutlinedFunction(const OMPExecutableDirective &D,
StringRef ParentName,
llvm::Function *&OutlinedFn,
llvm::Constant *&OutlinedFnID,
bool IsOffloadEntry,
const RegionCodeGenTy &CodeGen) override;
/// Emit the target offloading code associated with \a D. The emitted
/// code attempts offloading the execution to the device, an the event of
/// a failure it executes the host version outlined in \a OutlinedFn.
/// \param D Directive to emit.
/// \param OutlinedFn Host version of the code to be offloaded.
/// \param OutlinedFnID ID of host version of the code to be offloaded.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void
emitTargetCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
llvm::Function *OutlinedFn, llvm::Value *OutlinedFnID,
const Expr *IfCond, const Expr *Device,
llvm::function_ref<llvm::Value *(CodeGenFunction &CGF,
const OMPLoopDirective &D)>
SizeEmitter) override;
/// Emit the target regions enclosed in \a GD function definition or
/// the function itself in case it is a valid device function. Returns true if
/// \a GD was dealt with successfully.
/// \param GD Function to scan.
bool emitTargetFunctions(GlobalDecl GD) override;
/// Emit the global variable if it is a valid device global variable.
/// Returns true if \a GD was dealt with successfully.
/// \param GD Variable declaration to emit.
bool emitTargetGlobalVariable(GlobalDecl GD) override;
/// Emit the global \a GD if it is meaningful for the target. Returns
/// if it was emitted successfully.
/// \param GD Global to scan.
bool emitTargetGlobal(GlobalDecl GD) override;
/// Emits code for teams call of the \a OutlinedFn with
/// variables captured in a record which address is stored in \a
/// CapturedStruct.
/// \param OutlinedFn Outlined function to be run by team masters. Type of
/// this function is void(*)(kmp_int32 *, kmp_int32, struct context_vars*).
/// \param CapturedVars A pointer to the record with the references to
/// variables used in \a OutlinedFn function.
///
void emitTeamsCall(CodeGenFunction &CGF, const OMPExecutableDirective &D,
SourceLocation Loc, llvm::Function *OutlinedFn,
ArrayRef<llvm::Value *> CapturedVars) override;
/// Emits call to void __kmpc_push_num_teams(ident_t *loc, kmp_int32
/// global_tid, kmp_int32 num_teams, kmp_int32 thread_limit) to generate code
/// for num_teams clause.
/// \param NumTeams An integer expression of teams.
/// \param ThreadLimit An integer expression of threads.
void emitNumTeamsClause(CodeGenFunction &CGF, const Expr *NumTeams,
const Expr *ThreadLimit, SourceLocation Loc) override;
/// Emit the target data mapping code associated with \a D.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the
/// target directive, or null if no device clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
/// \param Info A record used to store information that needs to be preserved
/// until the region is closed.
void emitTargetDataCalls(CodeGenFunction &CGF,
const OMPExecutableDirective &D, const Expr *IfCond,
const Expr *Device, const RegionCodeGenTy &CodeGen,
TargetDataInfo &Info) override;
/// Emit the data mapping/movement code associated with the directive
/// \a D that should be of the form 'target [{enter|exit} data | update]'.
/// \param D Directive to emit.
/// \param IfCond Expression evaluated in if clause associated with the target
/// directive, or null if no if clause is used.
/// \param Device Expression evaluated in device clause associated with the
/// target directive, or null if no device clause is used.
void emitTargetDataStandAloneCall(CodeGenFunction &CGF,
const OMPExecutableDirective &D,
const Expr *IfCond,
const Expr *Device) override;
/// Emit initialization for doacross loop nesting support.
/// \param D Loop-based construct used in doacross nesting construct.
void emitDoacrossInit(CodeGenFunction &CGF, const OMPLoopDirective &D,
ArrayRef<Expr *> NumIterations) override;
/// Emit code for doacross ordered directive with 'depend' clause.
/// \param C 'depend' clause with 'sink|source' dependency kind.
void emitDoacrossOrdered(CodeGenFunction &CGF,
const OMPDependClause *C) override;
/// Translates the native parameter of outlined function if this is required
/// for target.
/// \param FD Field decl from captured record for the parameter.
/// \param NativeParam Parameter itself.
const VarDecl *translateParameter(const FieldDecl *FD,
const VarDecl *NativeParam) const override;
/// Gets the address of the native argument basing on the address of the
/// target-specific parameter.
/// \param NativeParam Parameter itself.
/// \param TargetParam Corresponding target-specific parameter.
Address getParameterAddress(CodeGenFunction &CGF, const VarDecl *NativeParam,
const VarDecl *TargetParam) const override;
/// Gets the OpenMP-specific address of the local variable.
Address getAddressOfLocalVariable(CodeGenFunction &CGF,
const VarDecl *VD) override {
return Address::invalid();
}
};
} // namespace CodeGen
} // namespace clang
#endif
|
lulesh.h | /*******************************************************************************
Copyright (c) 2016 Advanced Micro Devices, Inc.
All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors
may be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*******************************************************************************/
#if !defined(USE_MPI)
# error "You should specify USE_MPI=0 or USE_MPI=1 on the compile line"
#endif
// OpenMP will be compiled in if this flag is set to 1 AND the compiler beging
// used supports it (i.e. the _OPENMP symbol is defined)
#define USE_OMP 1
#if USE_MPI
#include <mpi.h>
/*
define one of these three symbols:
SEDOV_SYNC_POS_VEL_NONE
SEDOV_SYNC_POS_VEL_EARLY
SEDOV_SYNC_POS_VEL_LATE
*/
#define SEDOV_SYNC_POS_VEL_EARLY 1
#endif
#include <math.h>
#include <vector>
//**************************************************
// Allow flexibility for arithmetic representations
//**************************************************
#define MAX(a, b) ( ((a) > (b)) ? (a) : (b))
// Precision specification
typedef float real4 ;
typedef double real8 ;
typedef long double real10 ; // 10 bytes on x86
typedef int Index_t ; // array subscript and loop index
typedef real8 Real_t ; // floating point representation
typedef int Int_t ; // integer representation
enum { VolumeError = -1, QStopError = -2 } ;
#pragma omp declare target
inline real4 SQRT(real4 arg) { return sqrtf(arg) ; }
inline real8 SQRT(real8 arg) { return sqrt(arg) ; }
inline real10 SQRT(real10 arg) { return sqrtl(arg) ; }
inline real4 CBRT(real4 arg) { return cbrtf(arg) ; }
inline real8 CBRT(real8 arg) { return cbrt(arg) ; }
inline real10 CBRT(real10 arg) { return cbrtl(arg) ; }
inline real4 FABS(real4 arg) { return fabsf(arg) ; }
inline real8 FABS(real8 arg) { return fabs(arg) ; }
inline real10 FABS(real10 arg) { return fabsl(arg) ; }
#pragma omp end declare target
// Stuff needed for boundary conditions
// 2 BCs on each of 6 hexahedral faces (12 bits)
#define XI_M 0x00007
#define XI_M_SYMM 0x00001
#define XI_M_FREE 0x00002
#define XI_M_COMM 0x00004
#define XI_P 0x00038
#define XI_P_SYMM 0x00008
#define XI_P_FREE 0x00010
#define XI_P_COMM 0x00020
#define ETA_M 0x001c0
#define ETA_M_SYMM 0x00040
#define ETA_M_FREE 0x00080
#define ETA_M_COMM 0x00100
#define ETA_P 0x00e00
#define ETA_P_SYMM 0x00200
#define ETA_P_FREE 0x00400
#define ETA_P_COMM 0x00800
#define ZETA_M 0x07000
#define ZETA_M_SYMM 0x01000
#define ZETA_M_FREE 0x02000
#define ZETA_M_COMM 0x04000
#define ZETA_P 0x38000
#define ZETA_P_SYMM 0x08000
#define ZETA_P_FREE 0x10000
#define ZETA_P_COMM 0x20000
// MPI Message Tags
#define MSG_COMM_SBN 1024
#define MSG_SYNC_POS_VEL 2048
#define MSG_MONOQ 3072
#define MAX_FIELDS_PER_MPI_COMM 6
// Assume 128 byte coherence
// Assume Real_t is an "integral power of 2" bytes wide
#define CACHE_COHERENCE_PAD_REAL (128 / sizeof(Real_t))
#define CACHE_ALIGN_REAL(n) \
(((n) + (CACHE_COHERENCE_PAD_REAL - 1)) & ~(CACHE_COHERENCE_PAD_REAL-1))
//////////////////////////////////////////////////////
// Primary data structure
//////////////////////////////////////////////////////
/*
* The implementation of the data abstraction used for lulesh
* resides entirely in the Domain class below. You can change
* grouping and interleaving of fields here to maximize data layout
* efficiency for your underlying architecture or compiler.
*
* For example, fields can be implemented as STL objects or
* raw array pointers. As another example, individual fields
* m_x, m_y, m_z could be budled into
*
* struct { Real_t x, y, z ; } *m_coord ;
*
* allowing accessor functions such as
*
* "Real_t &x(Index_t idx) { return m_coord[idx].x ; }"
* "Real_t &y(Index_t idx) { return m_coord[idx].y ; }"
* "Real_t &z(Index_t idx) { return m_coord[idx].z ; }"
*/
class Domain {
public:
// Constructor
Domain(Int_t numRanks, Index_t colLoc,
Index_t rowLoc, Index_t planeLoc,
Index_t nx, Int_t tp, Int_t nr, Int_t balance, Int_t cost);
//
// ALLOCATION
//
void AllocateNodePersistent(Int_t numNode) // Node-centered
{
m_x.resize(numNode); // coordinates
m_y.resize(numNode);
m_z.resize(numNode);
m_xd.resize(numNode); // velocities
m_yd.resize(numNode);
m_zd.resize(numNode);
m_xdd.resize(numNode); // accelerations
m_ydd.resize(numNode);
m_zdd.resize(numNode);
m_fx.resize(numNode); // forces
m_fy.resize(numNode);
m_fz.resize(numNode);
m_nodalMass.resize(numNode); // mass
}
void AllocateElemPersistent(Int_t numElem) // Elem-centered
{
m_nodelist.resize(8*numElem);
// elem connectivities through face
m_lxim.resize(numElem);
m_lxip.resize(numElem);
m_letam.resize(numElem);
m_letap.resize(numElem);
m_lzetam.resize(numElem);
m_lzetap.resize(numElem);
m_elemBC.resize(numElem);
m_e.resize(numElem);
m_p.resize(numElem);
m_q.resize(numElem);
m_ql.resize(numElem);
m_qq.resize(numElem);
m_v.resize(numElem);
m_volo.resize(numElem);
m_delv.resize(numElem);
m_vdov.resize(numElem);
m_arealg.resize(numElem);
m_ss.resize(numElem);
m_elemMass.resize(numElem);
m_elemRep.resize(numElem);
m_elemElem.resize(numElem);
}
void AllocateGradients(Int_t numElem, Int_t allElem)
{
// Position gradients
m_delx_xi.resize(numElem) ;
m_delx_eta.resize(numElem) ;
m_delx_zeta.resize(numElem) ;
// Velocity gradients
m_delv_xi.resize(allElem) ;
m_delv_eta.resize(allElem);
m_delv_zeta.resize(allElem) ;
}
void DeallocateGradients()
{
m_delx_zeta.clear() ;
m_delx_eta.clear() ;
m_delx_xi.clear() ;
m_delv_zeta.clear() ;
m_delv_eta.clear() ;
m_delv_xi.clear() ;
}
void AllocateStrains(Int_t numElem)
{
m_dxx.resize(numElem) ;
m_dyy.resize(numElem) ;
m_dzz.resize(numElem) ;
}
void DeallocateStrains()
{
m_dzz.clear() ;
m_dyy.clear() ;
m_dxx.clear() ;
}
//
// ACCESSORS
//
// Node-centered
// Nodal coordinates
Real_t& x(Index_t idx) { return m_x[idx] ; }
Real_t& y(Index_t idx) { return m_y[idx] ; }
Real_t& z(Index_t idx) { return m_z[idx] ; }
// Nodal velocities
Real_t& xd(Index_t idx) { return m_xd[idx] ; }
Real_t& yd(Index_t idx) { return m_yd[idx] ; }
Real_t& zd(Index_t idx) { return m_zd[idx] ; }
// Nodal accelerations
Real_t& xdd(Index_t idx) { return m_xdd[idx] ; }
Real_t& ydd(Index_t idx) { return m_ydd[idx] ; }
Real_t& zdd(Index_t idx) { return m_zdd[idx] ; }
// Nodal forces
Real_t& fx(Index_t idx) { return m_fx[idx] ; }
Real_t& fy(Index_t idx) { return m_fy[idx] ; }
Real_t& fz(Index_t idx) { return m_fz[idx] ; }
// Nodal mass
Real_t& nodalMass(Index_t idx) { return m_nodalMass[idx] ; }
// Nodes on symmertry planes
Index_t symmX(Index_t idx) { return m_symmX[idx] ; }
Index_t symmY(Index_t idx) { return m_symmY[idx] ; }
Index_t symmZ(Index_t idx) { return m_symmZ[idx] ; }
bool symmXempty() { return m_symmX.empty(); }
bool symmYempty() { return m_symmY.empty(); }
bool symmZempty() { return m_symmZ.empty(); }
//
// Element-centered
//
Index_t& regElemSize(Index_t idx) { return m_regElemSize[idx] ; }
Index_t& regNumList(Index_t idx) { return m_regNumList[idx] ; }
Index_t* regNumList() { return &m_regNumList[0] ; }
Index_t* regElemlist(Int_t r) { return m_regElemlist[r] ; }
Index_t& regElemlist(Int_t r, Index_t idx) { return m_regElemlist[r][idx] ; }
Index_t* nodelist(Index_t idx) { return &m_nodelist[Index_t(8)*idx] ; }
// elem connectivities through face
Index_t& lxim(Index_t idx) { return m_lxim[idx] ; }
Index_t& lxip(Index_t idx) { return m_lxip[idx] ; }
Index_t& letam(Index_t idx) { return m_letam[idx] ; }
Index_t& letap(Index_t idx) { return m_letap[idx] ; }
Index_t& lzetam(Index_t idx) { return m_lzetam[idx] ; }
Index_t& lzetap(Index_t idx) { return m_lzetap[idx] ; }
// elem face symm/free-surface flag
Int_t& elemBC(Index_t idx) { return m_elemBC[idx] ; }
// Principal strains - temporary
Real_t& dxx(Index_t idx) { return m_dxx[idx] ; }
Real_t& dyy(Index_t idx) { return m_dyy[idx] ; }
Real_t& dzz(Index_t idx) { return m_dzz[idx] ; }
// Velocity gradient - temporary
Real_t& delv_xi(Index_t idx) { return m_delv_xi[idx] ; }
Real_t& delv_eta(Index_t idx) { return m_delv_eta[idx] ; }
Real_t& delv_zeta(Index_t idx) { return m_delv_zeta[idx] ; }
// Position gradient - temporary
Real_t& delx_xi(Index_t idx) { return m_delx_xi[idx] ; }
Real_t& delx_eta(Index_t idx) { return m_delx_eta[idx] ; }
Real_t& delx_zeta(Index_t idx) { return m_delx_zeta[idx] ; }
// Energy
Real_t& e(Index_t idx) { return m_e[idx] ; }
// Pressure
Real_t& p(Index_t idx) { return m_p[idx] ; }
// Artificial viscosity
Real_t& q(Index_t idx) { return m_q[idx] ; }
// Linear term for q
Real_t& ql(Index_t idx) { return m_ql[idx] ; }
// Quadratic term for q
Real_t& qq(Index_t idx) { return m_qq[idx] ; }
// Relative volume
Real_t& v(Index_t idx) { return m_v[idx] ; }
Real_t& delv(Index_t idx) { return m_delv[idx] ; }
// Reference volume
Real_t& volo(Index_t idx) { return m_volo[idx] ; }
// volume derivative over volume
Real_t& vdov(Index_t idx) { return m_vdov[idx] ; }
// Element characteristic length
Real_t& arealg(Index_t idx) { return m_arealg[idx] ; }
// Sound speed
Real_t& ss(Index_t idx) { return m_ss[idx] ; }
// Element mass
Real_t& elemMass(Index_t idx) { return m_elemMass[idx] ; }
// Element mass
Index_t& elemRep(Index_t idx) { return m_elemRep[idx] ; }
// Element mass
Index_t& elemElem(Index_t idx) { return m_elemElem[idx] ; }
Index_t nodeElemCount(Index_t idx)
{ return m_nodeElemStart[idx+1] - m_nodeElemStart[idx] ; }
Index_t *nodeElemCornerList(Index_t idx)
{ return &m_nodeElemCornerList[m_nodeElemStart[idx]] ; }
// Parameters
// Cutoffs
Real_t u_cut() const { return m_u_cut ; }
Real_t e_cut() const { return m_e_cut ; }
Real_t p_cut() const { return m_p_cut ; }
Real_t q_cut() const { return m_q_cut ; }
Real_t v_cut() const { return m_v_cut ; }
// Other constants (usually are settable via input file in real codes)
Real_t hgcoef() const { return m_hgcoef ; }
Real_t qstop() const { return m_qstop ; }
Real_t monoq_max_slope() const { return m_monoq_max_slope ; }
Real_t monoq_limiter_mult() const { return m_monoq_limiter_mult ; }
Real_t ss4o3() const { return m_ss4o3 ; }
Real_t qlc_monoq() const { return m_qlc_monoq ; }
Real_t qqc_monoq() const { return m_qqc_monoq ; }
Real_t qqc() const { return m_qqc ; }
Real_t eosvmax() const { return m_eosvmax ; }
Real_t eosvmin() const { return m_eosvmin ; }
Real_t pmin() const { return m_pmin ; }
Real_t emin() const { return m_emin ; }
Real_t dvovmax() const { return m_dvovmax ; }
Real_t refdens() const { return m_refdens ; }
// Timestep controls, etc...
Real_t& time() { return m_time ; }
Real_t& deltatime() { return m_deltatime ; }
Real_t& deltatimemultlb() { return m_deltatimemultlb ; }
Real_t& deltatimemultub() { return m_deltatimemultub ; }
Real_t& stoptime() { return m_stoptime ; }
Real_t& dtcourant() { return m_dtcourant ; }
Real_t& dthydro() { return m_dthydro ; }
Real_t& dtmax() { return m_dtmax ; }
Real_t& dtfixed() { return m_dtfixed ; }
Int_t& cycle() { return m_cycle ; }
Index_t& numRanks() { return m_numRanks ; }
Index_t& colLoc() { return m_colLoc ; }
Index_t& rowLoc() { return m_rowLoc ; }
Index_t& planeLoc() { return m_planeLoc ; }
Index_t& tp() { return m_tp ; }
Index_t& sizeX() { return m_sizeX ; }
Index_t& sizeY() { return m_sizeY ; }
Index_t& sizeZ() { return m_sizeZ ; }
Index_t& numReg() { return m_numReg ; }
Int_t& cost() { return m_cost ; }
Index_t& numElem() { return m_numElem ; }
Index_t& numNode() { return m_numNode ; }
Index_t& maxPlaneSize() { return m_maxPlaneSize ; }
Index_t& maxEdgeSize() { return m_maxEdgeSize ; }
//
// MPI-Related additional data
//
#if USE_MPI
// Communication Work space
Real_t *commDataSend ;
Real_t *commDataRecv ;
// Maximum number of block neighbors
MPI_Request recvRequest[26] ; // 6 faces + 12 edges + 8 corners
MPI_Request sendRequest[26] ; // 6 faces + 12 edges + 8 corners
#endif
// private:
void BuildMesh(Int_t nx, Int_t edgeNodes, Int_t edgeElems);
void SetupThreadSupportStructures();
void CreateRegionIndexSets(Int_t nreg, Int_t balance);
void SetupCommBuffers(Int_t edgeNodes);
void SetupSymmetryPlanes(Int_t edgeNodes);
void SetupElementConnectivities(Int_t edgeElems);
void SetupBoundaryConditions(Int_t edgeElems);
//
// IMPLEMENTATION
//
/* Node-centered */
std::vector<Real_t> m_x ; /* coordinates */
std::vector<Real_t> m_y ;
std::vector<Real_t> m_z ;
std::vector<Real_t> m_xd ; /* velocities */
std::vector<Real_t> m_yd ;
std::vector<Real_t> m_zd ;
std::vector<Real_t> m_xdd ; /* accelerations */
std::vector<Real_t> m_ydd ;
std::vector<Real_t> m_zdd ;
std::vector<Real_t> m_fx ; /* forces */
std::vector<Real_t> m_fy ;
std::vector<Real_t> m_fz ;
std::vector<Real_t> m_nodalMass ; /* mass */
std::vector<Index_t> m_symmX ; /* symmetry plane nodesets */
std::vector<Index_t> m_symmY ;
std::vector<Index_t> m_symmZ ;
// Element-centered
// Region information
Int_t m_numReg ;
Int_t m_cost; //imbalance cost
Index_t *m_regElemSize ; // Size of region sets
Index_t *m_regNumList ; // Region number per domain element
Index_t **m_regElemlist ; // region indexset
std::vector<Index_t> m_nodelist ; /* elemToNode connectivity */
std::vector<Index_t> m_lxim ; /* element connectivity across each face */
std::vector<Index_t> m_lxip ;
std::vector<Index_t> m_letam ;
std::vector<Index_t> m_letap ;
std::vector<Index_t> m_lzetam ;
std::vector<Index_t> m_lzetap ;
std::vector<Int_t> m_elemBC ; /* symmetry/free-surface flags for each elem face */
std::vector<Real_t> m_dxx ; /* principal strains -- temporary */
std::vector<Real_t> m_dyy ;
std::vector<Real_t> m_dzz ;
std::vector<Real_t> m_delv_xi ; /* velocity gradient -- temporary */
std::vector<Real_t> m_delv_eta ;
std::vector<Real_t> m_delv_zeta ;
std::vector<Real_t> m_delx_xi ; /* coordinate gradient -- temporary */
std::vector<Real_t> m_delx_eta ;
std::vector<Real_t> m_delx_zeta ;
std::vector<Real_t> m_e ; /* energy */
std::vector<Real_t> m_p ; /* pressure */
std::vector<Real_t> m_q ; /* q */
std::vector<Real_t> m_ql ; /* linear term for q */
std::vector<Real_t> m_qq ; /* quadratic term for q */
std::vector<Real_t> m_v ; /* relative volume */
std::vector<Real_t> m_volo ; /* reference volume */
std::vector<Real_t> m_vnew ; /* new relative volume -- temporary */
std::vector<Real_t> m_delv ; /* m_vnew - m_v */
std::vector<Real_t> m_vdov ; /* volume derivative over volume */
std::vector<Real_t> m_arealg ; /* characteristic length of an element */
std::vector<Real_t> m_ss ; /* "sound speed" */
std::vector<Real_t> m_elemMass ; /* mass */
std::vector<Index_t> m_elemRep ; /* reps */
std::vector<Index_t> m_elemElem ; /* elems */
// Cutoffs (treat as constants)
const Real_t m_e_cut ; // energy tolerance
const Real_t m_p_cut ; // pressure tolerance
const Real_t m_q_cut ; // q tolerance
const Real_t m_v_cut ; // relative volume tolerance
const Real_t m_u_cut ; // velocity tolerance
// Other constants (usually setable, but hardcoded in this proxy app)
const Real_t m_hgcoef ; // hourglass control
const Real_t m_ss4o3 ;
const Real_t m_qstop ; // excessive q indicator
const Real_t m_monoq_max_slope ;
const Real_t m_monoq_limiter_mult ;
const Real_t m_qlc_monoq ; // linear term coef for q
const Real_t m_qqc_monoq ; // quadratic term coef for q
const Real_t m_qqc ;
const Real_t m_eosvmax ;
const Real_t m_eosvmin ;
const Real_t m_pmin ; // pressure floor
const Real_t m_emin ; // energy floor
const Real_t m_dvovmax ; // maximum allowable volume change
const Real_t m_refdens ; // reference density
// Variables to keep track of timestep, simulation time, and cycle
Real_t m_dtcourant ; // courant constraint
Real_t m_dthydro ; // volume change constraint
Int_t m_cycle ; // iteration count for simulation
Real_t m_dtfixed ; // fixed time increment
Real_t m_time ; // current time
Real_t m_deltatime ; // variable time increment
Real_t m_deltatimemultlb ;
Real_t m_deltatimemultub ;
Real_t m_dtmax ; // maximum allowable time increment
Real_t m_stoptime ; // end time for simulation
Int_t m_numRanks ;
Index_t m_colLoc ;
Index_t m_rowLoc ;
Index_t m_planeLoc ;
Index_t m_tp ;
Index_t m_sizeX ;
Index_t m_sizeY ;
Index_t m_sizeZ ;
Index_t m_numElem ;
Index_t m_numNode ;
Index_t m_maxPlaneSize ;
Index_t m_maxEdgeSize ;
// OMP hack
Index_t *m_nodeElemStart ;
Index_t *m_nodeElemCornerList ;
// Used in setup
Index_t m_rowMin, m_rowMax;
Index_t m_colMin, m_colMax;
Index_t m_planeMin, m_planeMax ;
} ;
typedef Real_t &(Domain::* Domain_member )(Index_t) ;
struct cmdLineOpts {
Int_t its; // -i
Int_t nx; // -s
Int_t numReg; // -r
Int_t numFiles; // -f
Int_t showProg; // -p
Int_t quiet; // -q
Int_t viz; // -v
Int_t cost; // -c
Int_t balance; // -b
Int_t iteration_cap; // -z
};
// Function Prototypes
// lulesh-par
Real_t CalcElemVolume( const Real_t x[8],
const Real_t y[8],
const Real_t z[8]);
// lulesh-util
void ParseCommandLineOptions(int argc, char *argv[],
Int_t myRank, struct cmdLineOpts *opts);
void VerifyAndWriteFinalOutput(Real_t elapsed_time,
Domain& locDom,
Int_t nx,
Int_t numRanks);
// lulesh-viz
void DumpToVisit(Domain& domain, int numFiles, int myRank, int numRanks);
// lulesh-comm
void CommRecv(Domain& domain, Int_t msgType, Index_t xferFields,
Index_t dx, Index_t dy, Index_t dz,
bool doRecv, bool planeOnly);
void CommSend(Domain& domain, Int_t msgType,
Index_t xferFields, Domain_member *fieldData,
Index_t dx, Index_t dy, Index_t dz,
bool doSend, bool planeOnly);
void CommSBN(Domain& domain, Int_t xferFields, Domain_member *fieldData);
void CommSyncPosVel(Domain& domain);
void CommMonoQ(Domain& domain);
// lulesh-init
void InitMeshDecomp(Int_t numRanks, Int_t myRank,
Int_t *col, Int_t *row, Int_t *plane, Int_t *side);
|
irbuilder_unroll_unroll_partial_factor.c | // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py UTC_ARGS: --function-signature --include-generated-funcs
// RUN: %clang_cc1 -fopenmp-enable-irbuilder -verify -fopenmp -fopenmp-version=51 -x c -triple x86_64-unknown-unknown -emit-llvm %s -o - | FileCheck %s
// expected-no-diagnostics
#ifndef HEADER
#define HEADER
// CHECK-LABEL: define {{.*}}@unroll_partial_factor_for(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[A_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[B_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[C_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[D_ADDR:.+]] = alloca float*, align 8
// CHECK-NEXT: %[[I:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[AGG_CAPTURED:.+]] = alloca %struct.anon, align 8
// CHECK-NEXT: %[[AGG_CAPTURED1:.+]] = alloca %struct.anon.0, align 4
// CHECK-NEXT: %[[DOTCOUNT_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LASTITER:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_LOWERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_UPPERBOUND:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[P_STRIDE:.+]] = alloca i32, align 4
// CHECK-NEXT: store float* %[[A:.+]], float** %[[A_ADDR]], align 8
// CHECK-NEXT: store float* %[[B:.+]], float** %[[B_ADDR]], align 8
// CHECK-NEXT: store float* %[[C:.+]], float** %[[C_ADDR]], align 8
// CHECK-NEXT: store float* %[[D:.+]], float** %[[D_ADDR]], align 8
// CHECK-NEXT: store i32 0, i32* %[[I]], align 4
// CHECK-NEXT: %[[TMP0:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[AGG_CAPTURED]], i32 0, i32 0
// CHECK-NEXT: store i32* %[[I]], i32** %[[TMP0]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[AGG_CAPTURED1]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: store i32 %[[TMP2]], i32* %[[TMP1]], align 4
// CHECK-NEXT: call void @__captured_stmt(i32* %[[DOTCOUNT_ADDR]], %struct.anon* %[[AGG_CAPTURED]])
// CHECK-NEXT: %[[DOTCOUNT:.+]] = load i32, i32* %[[DOTCOUNT_ADDR]], align 4
// CHECK-NEXT: br label %[[OMP_LOOP_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_PREHEADER]]:
// CHECK-NEXT: %[[TMP3:.+]] = udiv i32 %[[DOTCOUNT]], 2
// CHECK-NEXT: %[[TMP4:.+]] = urem i32 %[[DOTCOUNT]], 2
// CHECK-NEXT: %[[TMP5:.+]] = icmp ne i32 %[[TMP4]], 0
// CHECK-NEXT: %[[TMP6:.+]] = zext i1 %[[TMP5]] to i32
// CHECK-NEXT: %[[OMP_FLOOR0_TRIPCOUNT:.+]] = add nuw i32 %[[TMP3]], %[[TMP6]]
// CHECK-NEXT: br label %[[OMP_FLOOR0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_PREHEADER]]:
// CHECK-NEXT: store i32 0, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = sub i32 %[[OMP_FLOOR0_TRIPCOUNT]], 1
// CHECK-NEXT: store i32 %[[TMP7]], i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: store i32 1, i32* %[[P_STRIDE]], align 4
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_for_static_init_4u(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]], i32 34, i32* %[[P_LASTITER]], i32* %[[P_LOWERBOUND]], i32* %[[P_UPPERBOUND]], i32* %[[P_STRIDE]], i32 1, i32 1)
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[P_LOWERBOUND]], align 4
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[P_UPPERBOUND]], align 4
// CHECK-NEXT: %[[TMP10:.+]] = sub i32 %[[TMP9]], %[[TMP8]]
// CHECK-NEXT: %[[TMP11:.+]] = add i32 %[[TMP10]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_HEADER]]:
// CHECK-NEXT: %[[OMP_FLOOR0_IV:.+]] = phi i32 [ 0, %[[OMP_FLOOR0_PREHEADER]] ], [ %[[OMP_FLOOR0_NEXT:.+]], %[[OMP_FLOOR0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_FLOOR0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_COND]]:
// CHECK-NEXT: %[[OMP_FLOOR0_CMP:.+]] = icmp ult i32 %[[OMP_FLOOR0_IV]], %[[TMP11]]
// CHECK-NEXT: br i1 %[[OMP_FLOOR0_CMP]], label %[[OMP_FLOOR0_BODY:.+]], label %[[OMP_FLOOR0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_BODY]]:
// CHECK-NEXT: %[[TMP12:.+]] = add i32 %[[OMP_FLOOR0_IV]], %[[TMP8]]
// CHECK-NEXT: %[[TMP13:.+]] = icmp eq i32 %[[TMP12]], %[[OMP_FLOOR0_TRIPCOUNT]]
// CHECK-NEXT: %[[TMP14:.+]] = select i1 %[[TMP13]], i32 %[[TMP4]], i32 2
// CHECK-NEXT: br label %[[OMP_TILE0_PREHEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_PREHEADER]]:
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_HEADER]]:
// CHECK-NEXT: %[[OMP_TILE0_IV:.+]] = phi i32 [ 0, %[[OMP_TILE0_PREHEADER]] ], [ %[[OMP_TILE0_NEXT:.+]], %[[OMP_TILE0_INC:.+]] ]
// CHECK-NEXT: br label %[[OMP_TILE0_COND:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_COND]]:
// CHECK-NEXT: %[[OMP_TILE0_CMP:.+]] = icmp ult i32 %[[OMP_TILE0_IV]], %[[TMP14]]
// CHECK-NEXT: br i1 %[[OMP_TILE0_CMP]], label %[[OMP_TILE0_BODY:.+]], label %[[OMP_TILE0_EXIT:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_BODY]]:
// CHECK-NEXT: %[[TMP15:.+]] = mul nuw i32 2, %[[TMP12]]
// CHECK-NEXT: %[[TMP16:.+]] = add nuw i32 %[[TMP15]], %[[OMP_TILE0_IV]]
// CHECK-NEXT: br label %[[OMP_LOOP_BODY:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_BODY]]:
// CHECK-NEXT: call void @__captured_stmt.1(i32* %[[I]], i32 %[[TMP16]], %struct.anon.0* %[[AGG_CAPTURED1]])
// CHECK-NEXT: %[[TMP17:.+]] = load float*, float** %[[B_ADDR]], align 8
// CHECK-NEXT: %[[TMP18:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM:.+]] = sext i32 %[[TMP18]] to i64
// CHECK-NEXT: %[[ARRAYIDX:.+]] = getelementptr inbounds float, float* %[[TMP17]], i64 %[[IDXPROM]]
// CHECK-NEXT: %[[TMP19:.+]] = load float, float* %[[ARRAYIDX]], align 4
// CHECK-NEXT: %[[TMP20:.+]] = load float*, float** %[[C_ADDR]], align 8
// CHECK-NEXT: %[[TMP21:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM2:.+]] = sext i32 %[[TMP21]] to i64
// CHECK-NEXT: %[[ARRAYIDX3:.+]] = getelementptr inbounds float, float* %[[TMP20]], i64 %[[IDXPROM2]]
// CHECK-NEXT: %[[TMP22:.+]] = load float, float* %[[ARRAYIDX3]], align 4
// CHECK-NEXT: %[[MUL:.+]] = fmul float %[[TMP19]], %[[TMP22]]
// CHECK-NEXT: %[[TMP23:.+]] = load float*, float** %[[D_ADDR]], align 8
// CHECK-NEXT: %[[TMP24:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM4:.+]] = sext i32 %[[TMP24]] to i64
// CHECK-NEXT: %[[ARRAYIDX5:.+]] = getelementptr inbounds float, float* %[[TMP23]], i64 %[[IDXPROM4]]
// CHECK-NEXT: %[[TMP25:.+]] = load float, float* %[[ARRAYIDX5]], align 4
// CHECK-NEXT: %[[MUL6:.+]] = fmul float %[[MUL]], %[[TMP25]]
// CHECK-NEXT: %[[TMP26:.+]] = load float*, float** %[[A_ADDR]], align 8
// CHECK-NEXT: %[[TMP27:.+]] = load i32, i32* %[[I]], align 4
// CHECK-NEXT: %[[IDXPROM7:.+]] = sext i32 %[[TMP27]] to i64
// CHECK-NEXT: %[[ARRAYIDX8:.+]] = getelementptr inbounds float, float* %[[TMP26]], i64 %[[IDXPROM7]]
// CHECK-NEXT: store float %[[MUL6]], float* %[[ARRAYIDX8]], align 4
// CHECK-NEXT: br label %[[OMP_TILE0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_INC]]:
// CHECK-NEXT: %[[OMP_TILE0_NEXT]] = add nuw i32 %[[OMP_TILE0_IV]], 1
// CHECK-NEXT: br label %[[OMP_TILE0_HEADER]], !llvm.loop ![[LOOP3:[0-9]+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_EXIT]]:
// CHECK-NEXT: br label %[[OMP_TILE0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_TILE0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_FLOOR0_INC]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_INC]]:
// CHECK-NEXT: %[[OMP_FLOOR0_NEXT]] = add nuw i32 %[[OMP_FLOOR0_IV]], 1
// CHECK-NEXT: br label %[[OMP_FLOOR0_HEADER]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_EXIT]]:
// CHECK-NEXT: call void @__kmpc_for_static_fini(%struct.ident_t* @1, i32 %[[OMP_GLOBAL_THREAD_NUM]])
// CHECK-NEXT: %[[OMP_GLOBAL_THREAD_NUM9:.+]] = call i32 @__kmpc_global_thread_num(%struct.ident_t* @1)
// CHECK-NEXT: call void @__kmpc_barrier(%struct.ident_t* @2, i32 %[[OMP_GLOBAL_THREAD_NUM9]])
// CHECK-NEXT: br label %[[OMP_FLOOR0_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_FLOOR0_AFTER]]:
// CHECK-NEXT: br label %[[OMP_LOOP_AFTER:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[OMP_LOOP_AFTER]]:
// CHECK-NEXT: ret void
// CHECK-NEXT: }
void unroll_partial_factor_for(float *a, float *b, float *c, float *d) {
#pragma omp for
#pragma omp unroll partial(2)
for (int i = 0; i < 2; i++) {
a[i] = b[i] * c[i] * d[i];
}
}
#endif // HEADER
// CHECK-LABEL: define {{.*}}@__captured_stmt(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[DISTANCE_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon*, align 8
// CHECK-NEXT: %[[DOTSTART:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTOP:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[DOTSTEP:.+]] = alloca i32, align 4
// CHECK-NEXT: store i32* %[[DISTANCE:.+]], i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store %struct.anon* %[[__CONTEXT:.+]], %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon*, %struct.anon** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon, %struct.anon* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32*, i32** %[[TMP1]], align 8
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[TMP2]], align 4
// CHECK-NEXT: store i32 %[[TMP3]], i32* %[[DOTSTART]], align 4
// CHECK-NEXT: store i32 2, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: store i32 1, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[TMP4:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[TMP5:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[CMP:.+]] = icmp slt i32 %[[TMP4]], %[[TMP5]]
// CHECK-NEXT: br i1 %[[CMP]], label %[[COND_TRUE:.+]], label %[[COND_FALSE:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_TRUE]]:
// CHECK-NEXT: %[[TMP6:.+]] = load i32, i32* %[[DOTSTOP]], align 4
// CHECK-NEXT: %[[TMP7:.+]] = load i32, i32* %[[DOTSTART]], align 4
// CHECK-NEXT: %[[SUB:.+]] = sub nsw i32 %[[TMP6]], %[[TMP7]]
// CHECK-NEXT: %[[TMP8:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[SUB1:.+]] = sub i32 %[[TMP8]], 1
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[SUB]], %[[SUB1]]
// CHECK-NEXT: %[[TMP9:.+]] = load i32, i32* %[[DOTSTEP]], align 4
// CHECK-NEXT: %[[DIV:.+]] = udiv i32 %[[ADD]], %[[TMP9]]
// CHECK-NEXT: br label %[[COND_END:.+]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_FALSE]]:
// CHECK-NEXT: br label %[[COND_END]]
// CHECK-EMPTY:
// CHECK-NEXT: [[COND_END]]:
// CHECK-NEXT: %[[COND:.+]] = phi i32 [ %[[DIV]], %[[COND_TRUE]] ], [ 0, %[[COND_FALSE]] ]
// CHECK-NEXT: %[[TMP10:.+]] = load i32*, i32** %[[DISTANCE_ADDR]], align 8
// CHECK-NEXT: store i32 %[[COND]], i32* %[[TMP10]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK-LABEL: define {{.*}}@__captured_stmt.1(
// CHECK-NEXT: [[ENTRY:.*]]:
// CHECK-NEXT: %[[LOOPVAR_ADDR:.+]] = alloca i32*, align 8
// CHECK-NEXT: %[[LOGICAL_ADDR:.+]] = alloca i32, align 4
// CHECK-NEXT: %[[__CONTEXT_ADDR:.+]] = alloca %struct.anon.0*, align 8
// CHECK-NEXT: store i32* %[[LOOPVAR:.+]], i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[LOGICAL:.+]], i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: store %struct.anon.0* %[[__CONTEXT:.+]], %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP0:.+]] = load %struct.anon.0*, %struct.anon.0** %[[__CONTEXT_ADDR]], align 8
// CHECK-NEXT: %[[TMP1:.+]] = getelementptr inbounds %struct.anon.0, %struct.anon.0* %[[TMP0]], i32 0, i32 0
// CHECK-NEXT: %[[TMP2:.+]] = load i32, i32* %[[TMP1]], align 4
// CHECK-NEXT: %[[TMP3:.+]] = load i32, i32* %[[LOGICAL_ADDR]], align 4
// CHECK-NEXT: %[[MUL:.+]] = mul i32 1, %[[TMP3]]
// CHECK-NEXT: %[[ADD:.+]] = add i32 %[[TMP2]], %[[MUL]]
// CHECK-NEXT: %[[TMP4:.+]] = load i32*, i32** %[[LOOPVAR_ADDR]], align 8
// CHECK-NEXT: store i32 %[[ADD]], i32* %[[TMP4]], align 4
// CHECK-NEXT: ret void
// CHECK-NEXT: }
// CHECK: ![[META0:[0-9]+]] = !{i32 1, !"wchar_size", i32 4}
// CHECK: ![[META1:[0-9]+]] = !{i32 7, !"openmp", i32 51}
// CHECK: ![[META2:[0-9]+]] =
// CHECK: ![[LOOP3]] = distinct !{![[LOOP3]], ![[LOOPPROP4:[0-9]+]], ![[LOOPPROP5:[0-9]+]]}
// CHECK: ![[LOOPPROP4]] = !{!"llvm.loop.unroll.enable"}
// CHECK: ![[LOOPPROP5]] = !{!"llvm.loop.unroll.count", i32 2}
|
mhpTest9.c | int main() {
int x = 1;
#pragma omp parallel
{
int p;
if (0) {
while (0) {
#pragma omp atomic read
p = x;
}
} else {
#pragma omp atomic write
x = 0;
}
}
}
|
mm.c | #include <stdio.h>
#include <stdlib.h>
/*
* Tempo Sequencial:
* real 1m9.236s
* user 1m8.004s
* sys 0m0.132s
*
* Tempo multicore:
* real 0m17.290s
* user 1m8.421s
* sys 0m0.148s
*
* Tempo, warps_launched e warp_execution_efficiency GPU OpenMP:
*
* real 0m5.384s
* user 0m3.656s
* sys 0m1.756s
*
* ==21740== NVPROF is profiling process 21740, command: ./mmGPU
* ==21740== Some kernel(s) will be replayed on device 0 in order to collect all events/metrics.
* ==21740== Replaying kernel "mm$_omp_fn$0" (done)
* ==21740== Profiling application: ./mmGPU
* ==21740== Profiling result:
* ==21740== Event result:
* Invocations Event Name Min Max Avg Total
* Device "GeForce GT 1030 (0)"
* Kernel: mm$_omp_fn$0
* 1 warps_launched 72 72 72 72
*
* ==21740== Metric result:
* Invocations Metric Name Metric Description Min Max Avg
* Device "GeForce GT 1030 (0)"
* Kernel: mm$_omp_fn$0
* 1 warp_execution_efficiency Warp Execution Efficiency 86.81% 86.81% 86.81%
*
*
*
*
*/
void mm(double* a, double* b, double* c, int width)
{
// #pragma omp parallel for schedule(dynamic)
#pragma omp target map(to:a[0:width*width], b[0:width*width]) map(from:c[0:width*width])
#pragma omp teams distribute parallel for simd
for (int i = 0; i < width; i++)
{
for (int j = 0; j < width; j++)
{
double sum = 0;
for (int k = 0; k < width; k++)
{
double x = a[i * width + k];
double y = b[k * width + j];
sum += x * y;
}
c[i * width + j] = sum;
}
}
}
int main()
{
int width = 2000;
double *a = (double*) malloc (width * width * sizeof(double));
double *b = (double*) malloc (width * width * sizeof(double));
double *c = (double*) malloc (width * width * sizeof(double));
#pragma omp parallel for schedule(dynamic)
for(int i = 0; i < width; i++) {
for(int j = 0; j < width; j++) {
a[i*width+j] = i;
b[i*width+j] = j;
c[i*width+j] = 0;
}
}
mm(a,b,c,width);
/*
for(int i = 0; i < width; i++) {
for(int j = 0; j < width; j++) {
printf("\n c[%d][%d] = %f",i,j,c[i*width+j]);
}
}*/
}
|
hmacMD5_fmt_plug.c | /*
* This software is Copyright (c) 2010 bartavelle, <bartavelle at bandecon.com>
* and (c) magnum 2011-2015,
* and it is hereby released to the general public under the following terms:
* Redistribution and use in source and binary forms, with or without
* modification, are permitted.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_hmacMD5;
#elif FMT_REGISTERS_H
john_register_one(&fmt_hmacMD5);
#else
#include <string.h>
#include "arch.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 2048 // tuned for i7 using SSE2 and w/o HT
#endif
#endif
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "md5.h"
#include "aligned.h"
#include "simd-intrinsics.h"
#include "base64_convert.h"
#include "memdbg.h"
#define FORMAT_LABEL "HMAC-MD5"
#define FORMAT_NAME ""
#ifdef SIMD_COEF_32
#define MD5_N (SIMD_PARA_MD5 * SIMD_COEF_32)
#endif
#define ALGORITHM_NAME "password is key, MD5 " MD5_ALGORITHM_NAME
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH 0
#define PLAINTEXT_LENGTH 125
#define PAD_SIZE 64
#define PAD_SIZE_W (PAD_SIZE/4)
#define BINARY_SIZE 16
#define BINARY_ALIGN sizeof(ARCH_WORD_32)
#ifdef SIMD_COEF_32
#define SALT_LIMBS 3 /* 3 limbs, 183 bytes */
#define SALT_LENGTH (SALT_LIMBS * PAD_SIZE - 9)
#define SALT_ALIGN MEM_ALIGN_SIMD
#else
#define SALT_LENGTH 1023
#define SALT_ALIGN 1
#endif
#define CIPHERTEXT_LENGTH (2 * SALT_LENGTH + 2 * BINARY_SIZE)
#define HEXCHARS "0123456789abcdef"
#ifdef SIMD_COEF_32
#define MIN_KEYS_PER_CRYPT MD5_N
#define MAX_KEYS_PER_CRYPT MD5_N
#define GETPOS(i, index) ((index & (SIMD_COEF_32 - 1)) * 4 + ((i&63) & (0xffffffff - 3)) * SIMD_COEF_32 + ((i&63) & 3) + (unsigned int)index/SIMD_COEF_32 * PAD_SIZE * SIMD_COEF_32)
#else
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"what do ya want for nothing?#750c783e6ab0b503eaa86e310a5db738", "Jefe"},
{"YT1m11GDMm3oze0EdqO3FZmATSrxhquB#6c97850b296b34719b7cea5c0c751e22", ""},
{"2shXeqDlLdZ2pSMc0CBHfTyA5a9TKuSW#dfeb02c6f8a9ce89b554be60db3a2333", "magnum"},
{"#74e6f7298a9c2d168935f58c001bad88", ""},
{"The quick brown fox jumps over the lazy dog#80070713463e7749b90c2dc24911e275", "key"},
{"Beppe Grillo#F8457C3046C587BBCBD6D7036BA42C81", "Io credo nella reincarnazione e sono di Genova; per cui ho fatto testamento e mi sono lasciato tutto a me."},
{"$cram_md5$PG5vLXJlcGx5QGhhc2hjYXQubmV0Pg==$dXNlciA0NGVhZmQyMmZlNzY2NzBmNmIyODc5MDgxYTdmNWY3MQ==", "hashcat"},
{"MEaEObR2JNXgchVn93GLLH1Ud4qTzuC0#9a80bea0acd72231ea043210a173ec7f", "123"},
{"d2BbCbiSXTlglEstbFFlrRgPhR1KUa2s#7a553738bc4997e656329c1b1ef99e4f", "123456789"},
{"dBTmX1AdmnWyVkMKp7BEt4O3eBktdN2S#f6af0afd4f397504c3bfa3836bc04a0f", "passWOrd"},
{"0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789#050a9dee01b2302914b2a78346721d9b", "magnum"},
{"123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123#e4d0097fdc52f6fc50545d832784232d", "MaxLenSaltUsed"},
{NULL}
};
#ifdef SIMD_COEF_32
static unsigned char *crypt_key;
static unsigned char *ipad, *prep_ipad;
static unsigned char *opad, *prep_opad;
typedef struct cur_salt_t {
unsigned char salt[SALT_LIMBS][PAD_SIZE * MAX_KEYS_PER_CRYPT];
int salt_len;
} cur_salt_t;
static cur_salt_t *cur_salt;
static int bufsize;
#define SALT_SIZE sizeof(cur_salt_t)
#else
static ARCH_WORD_32 (*crypt_key)[BINARY_SIZE / sizeof(ARCH_WORD_32)];
static unsigned char (*ipad)[PAD_SIZE];
static unsigned char (*opad)[PAD_SIZE];
static unsigned char cur_salt[SALT_LENGTH+1];
static MD5_CTX *ipad_ctx;
static MD5_CTX *opad_ctx;
#define SALT_SIZE sizeof(cur_salt)
#endif
static char (*saved_plain)[PLAINTEXT_LENGTH + 1];
static int new_keys;
#ifdef SIMD_COEF_32
static void clear_keys(void)
{
memset(ipad, 0x36, bufsize);
memset(opad, 0x5C, bufsize);
}
#endif
static void init(struct fmt_main *self)
{
#ifdef SIMD_COEF_32
unsigned int i;
#endif
#ifdef _OPENMP
int omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
#ifdef SIMD_COEF_32
bufsize = sizeof(*opad) * self->params.max_keys_per_crypt * PAD_SIZE;
crypt_key = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
ipad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
opad = mem_calloc_align(1, bufsize, MEM_ALIGN_SIMD);
prep_ipad = mem_calloc_align(self->params.max_keys_per_crypt,
BINARY_SIZE, MEM_ALIGN_SIMD);
prep_opad = mem_calloc_align(self->params.max_keys_per_crypt,
BINARY_SIZE, MEM_ALIGN_SIMD);
for (i = 0; i < self->params.max_keys_per_crypt; ++i) {
crypt_key[GETPOS(BINARY_SIZE, i)] = 0x80;
((unsigned int*)crypt_key)[14 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + (i/SIMD_COEF_32) * PAD_SIZE_W * SIMD_COEF_32] = (BINARY_SIZE + PAD_SIZE) << 3;
}
clear_keys();
#else
crypt_key = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*crypt_key));
ipad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*ipad));
opad = mem_calloc(self->params.max_keys_per_crypt, sizeof(*opad));
ipad_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*ipad_ctx));
opad_ctx = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*opad_ctx));
#endif
saved_plain = mem_calloc(self->params.max_keys_per_crypt,
sizeof(*saved_plain));
}
static void done(void)
{
MEM_FREE(saved_plain);
#ifdef SIMD_COEF_32
MEM_FREE(prep_opad);
MEM_FREE(prep_ipad);
#else
MEM_FREE(opad_ctx);
MEM_FREE(ipad_ctx);
#endif
MEM_FREE(opad);
MEM_FREE(ipad);
MEM_FREE(crypt_key);
}
/* Convert from Base64 format with tag to our legacy format */
static char *prepare(char *split_fields[10], struct fmt_main *self)
{
char *p = split_fields[1];
if (!strncmp(p, "$cram_md5$", 10)) {
static char out[256];
int len, len2;
char *d, *o = out;
p += 10;
memset(out, 0, sizeof(out));
if (!(d = strchr(p, '$')))
return split_fields[1];
len = base64_convert(p, e_b64_mime, (int)(d - p - 1),
o, e_b64_raw,
sizeof(out),
flg_Base64_MIME_TRAIL_EQ, 0);
if (len > sizeof(out)-2)
return split_fields[1];
o += len;
*o++ = '#';
d++;
len2 = base64_convert(d, e_b64_mime, strlen(d),
o, e_b64_raw,
sizeof(out) - len - 2,
flg_Base64_MIME_TRAIL_EQ, 0);
if (len2 > sizeof(out) - len - 3)
return split_fields[1];
len = len2;
if (!(p = strchr(o, ' ')))
return split_fields[1];
p++;
if (p-o >= len)
return split_fields[1];
memmove(o, p, len - (p - o) + 1);
if (strlen(o) == BINARY_SIZE * 2)
return out;
}
return p;
}
static char *split(char *ciphertext, int index, struct fmt_main *self)
{
static char out[CIPHERTEXT_LENGTH + 1];
if (strstr(ciphertext, "$SOURCE_HASH$"))
return ciphertext;
strnzcpy(out, ciphertext, CIPHERTEXT_LENGTH + 1);
strlwr(strrchr(out, '#'));
return out;
}
static int valid(char *ciphertext, struct fmt_main *self)
{
int pos, i;
char *p;
if (!strncmp(ciphertext, "$cram_md5$", 10)) {
char *f[10];
f[1] = ciphertext;
ciphertext = prepare(f, self);
}
p = strrchr(ciphertext, '#'); // allow # in salt
if (!p || p > &ciphertext[strlen(ciphertext) - 1])
return 0;
i = (int)(p - ciphertext);
if (i > SALT_LENGTH)
return 0;
pos = i + 1;
if (strlen(ciphertext+pos) != BINARY_SIZE * 2)
return 0;
for (i = pos; i < BINARY_SIZE*2+pos; i++) {
if (!((('0' <= ciphertext[i])&&(ciphertext[i] <= '9')) ||
(('a' <= ciphertext[i])&&(ciphertext[i] <= 'f'))
|| (('A' <= ciphertext[i])&&(ciphertext[i] <= 'F'))))
return 0;
}
return 1;
}
static void set_salt(void *salt)
{
#ifdef SIMD_COEF_32
cur_salt = salt;
#else
strcpy((char*)cur_salt, (char*)salt);
#endif
}
static void set_key(char *key, int index)
{
int len;
#ifdef SIMD_COEF_32
ARCH_WORD_32 *ipadp = (ARCH_WORD_32*)&ipad[GETPOS(0, index)];
ARCH_WORD_32 *opadp = (ARCH_WORD_32*)&opad[GETPOS(0, index)];
const ARCH_WORD_32 *keyp = (ARCH_WORD_32*)key;
unsigned int temp;
len = strlen(key);
memcpy(saved_plain[index], key, len);
saved_plain[index][len] = 0;
if (len > PAD_SIZE) {
unsigned char k0[BINARY_SIZE];
MD5_CTX ctx;
int i;
MD5_Init(&ctx);
MD5_Update(&ctx, key, len);
MD5_Final(k0, &ctx);
keyp = (unsigned int*)k0;
for(i = 0; i < BINARY_SIZE / 4; i++, ipadp += SIMD_COEF_32, opadp += SIMD_COEF_32)
{
temp = *keyp++;
*ipadp ^= temp;
*opadp ^= temp;
}
}
else
while((unsigned char)(temp = *keyp++)) {
if (!(temp & 0xff00) || !(temp & 0xff0000))
{
*ipadp ^= (unsigned short)temp;
*opadp ^= (unsigned short)temp;
break;
}
*ipadp ^= temp;
*opadp ^= temp;
if (!(temp & 0xff000000))
break;
ipadp += SIMD_COEF_32;
opadp += SIMD_COEF_32;
}
#else
int i;
len = strlen(key);
memcpy(saved_plain[index], key, len);
saved_plain[index][len] = 0;
memset(ipad[index], 0x36, PAD_SIZE);
memset(opad[index], 0x5C, PAD_SIZE);
if (len > PAD_SIZE) {
MD5_CTX ctx;
unsigned char k0[BINARY_SIZE];
MD5_Init(&ctx);
MD5_Update(&ctx, key, len);
MD5_Final(k0, &ctx);
len = BINARY_SIZE;
for(i = 0; i < len; i++)
{
ipad[index][i] ^= k0[i];
opad[index][i] ^= k0[i];
}
}
else
for(i = 0; i < len; i++)
{
ipad[index][i] ^= key[i];
opad[index][i] ^= key[i];
}
#endif
new_keys = 1;
}
static char *get_key(int index)
{
return saved_plain[index];
}
static int cmp_all(void *binary, int count)
{
#ifdef SIMD_COEF_32
unsigned int x, y = 0;
for(; y < (unsigned int)(count + SIMD_COEF_32 - 1) / SIMD_COEF_32; y++)
for(x = 0; x < SIMD_COEF_32; x++)
{
// NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_32)
if (((ARCH_WORD_32*)binary)[0] == ((ARCH_WORD_32*)crypt_key)[x + y * SIMD_COEF_32 * PAD_SIZE_W])
return 1;
}
return 0;
#else
int index = 0;
#if defined(_OPENMP) || (MAX_KEYS_PER_CRYPT > 1)
for (index = 0; index < count; index++)
#endif
if (((ARCH_WORD_32*)binary)[0] == crypt_key[index][0])
return 1;
return 0;
#endif
}
static int cmp_one(void *binary, int index)
{
#ifdef SIMD_COEF_32
int i;
for(i = 0; i < (BINARY_SIZE/4); i++)
// NOTE crypt_key is in input format (PAD_SIZE * SIMD_COEF_32)
if (((ARCH_WORD_32*)binary)[i] != ((ARCH_WORD_32*)crypt_key)[i * SIMD_COEF_32 + (index&(SIMD_COEF_32-1)) + (unsigned int)index/SIMD_COEF_32 * PAD_SIZE_W * SIMD_COEF_32])
return 0;
return 1;
#else
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return (1);
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0;
#if _OPENMP
#pragma omp parallel for
for (index = 0; index < count; index += MAX_KEYS_PER_CRYPT)
#endif
{
#ifdef SIMD_COEF_32
int i;
if (new_keys) {
SIMDmd5body(&ipad[index * PAD_SIZE],
(unsigned int*)&prep_ipad[index * BINARY_SIZE],
NULL, SSEi_MIXED_IN);
SIMDmd5body(&opad[index * PAD_SIZE],
(unsigned int*)&prep_opad[index * BINARY_SIZE],
NULL, SSEi_MIXED_IN);
}
SIMDmd5body(cur_salt->salt[0],
(unsigned int*)&crypt_key[index * PAD_SIZE],
(unsigned int*)&prep_ipad[index * BINARY_SIZE],
SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);
for (i = 1; i <= (cur_salt->salt_len + 8) / PAD_SIZE; i++) {
SIMDmd5body(cur_salt->salt[i],
(unsigned int*)&crypt_key[index * PAD_SIZE],
(unsigned int*)&crypt_key[index * PAD_SIZE],
SSEi_MIXED_IN|SSEi_RELOAD_INP_FMT|SSEi_OUTPUT_AS_INP_FMT);
}
SIMDmd5body(&crypt_key[index * PAD_SIZE],
(unsigned int*)&crypt_key[index * PAD_SIZE],
(unsigned int*)&prep_opad[index * BINARY_SIZE],
SSEi_MIXED_IN|SSEi_RELOAD|SSEi_OUTPUT_AS_INP_FMT);
#else
MD5_CTX ctx;
if (new_keys) {
MD5_Init(&ipad_ctx[index]);
MD5_Update(&ipad_ctx[index], ipad[index], PAD_SIZE);
MD5_Init(&opad_ctx[index]);
MD5_Update(&opad_ctx[index], opad[index], PAD_SIZE);
}
memcpy(&ctx, &ipad_ctx[index], sizeof(ctx));
MD5_Update(&ctx, cur_salt, strlen((char*)cur_salt));
MD5_Final((unsigned char*) crypt_key[index], &ctx);
memcpy(&ctx, &opad_ctx[index], sizeof(ctx));
MD5_Update(&ctx, crypt_key[index], BINARY_SIZE);
MD5_Final((unsigned char*) crypt_key[index], &ctx);
#endif
}
new_keys = 0;
return count;
}
static void *get_binary(char *ciphertext)
{
static union {
unsigned char c[BINARY_SIZE];
ARCH_WORD_32 dummy;
} buf;
unsigned char *out = buf.c;
char *p;
int i;
// allow # in salt
p = strrchr(ciphertext, '#') + 1;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = (atoi16[ARCH_INDEX(*p)] << 4) |
atoi16[ARCH_INDEX(p[1])];
p += 2;
}
return (void*)out;
}
static void *get_salt(char *ciphertext)
{
static unsigned char salt[SALT_LENGTH+1];
int len;
#ifdef SIMD_COEF_32
unsigned int i = 0;
static JTR_ALIGN(MEM_ALIGN_SIMD) cur_salt_t cur_salt;
int salt_len = 0;
#endif
// allow # in salt
len = strrchr(ciphertext, '#') - ciphertext;
memset(salt, 0, sizeof(salt));
memcpy(salt, ciphertext, len);
#ifdef SIMD_COEF_32
memset(&cur_salt, 0, sizeof(cur_salt));
while(((unsigned char*)salt)[salt_len])
{
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i)
cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] =
((unsigned char*)salt)[salt_len];
++salt_len;
}
cur_salt.salt_len = salt_len;
for (i = 0; i < MAX_KEYS_PER_CRYPT; ++i) {
cur_salt.salt[salt_len / PAD_SIZE][GETPOS(salt_len, i)] = 0x80;
((unsigned int*)cur_salt.salt[(salt_len + 8) / PAD_SIZE])[14 * SIMD_COEF_32 + (i&(SIMD_COEF_32-1)) + i/SIMD_COEF_32 * PAD_SIZE_W * SIMD_COEF_32] = (salt_len + PAD_SIZE) << 3;
}
return &cur_salt;
#else
return salt;
#endif
}
struct fmt_main fmt_hmacMD5 = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_OMP_BAD |
FMT_SPLIT_UNIFIES_CASE,
{ NULL },
{ NULL },
tests
}, {
init,
done,
fmt_default_reset,
prepare,
valid,
split,
get_binary,
get_salt,
{ NULL },
fmt_default_source,
{
fmt_default_binary_hash /* Not usable with $SOURCE_HASH$ */
},
fmt_default_salt_hash,
NULL,
set_salt,
set_key,
get_key,
#ifdef SIMD_COEF_32
clear_keys,
#else
fmt_default_clear_keys,
#endif
crypt_all,
{
fmt_default_get_hash /* Not usable with $SOURCE_HASH$ */
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
hpgmg.c | //------------------------------------------------------------------------------------------------------------------------------
// Copyright Notice
//------------------------------------------------------------------------------------------------------------------------------
// HPGMG, Copyright (c) 2014, The Regents of the University of
// California, through Lawrence Berkeley National Laboratory (subject to
// receipt of any required approvals from the U.S. Dept. of Energy). All
// rights reserved.
//
// If you have questions about your rights to use or distribute this
// software, please contact Berkeley Lab's Technology Transfer Department
// at TTD@lbl.gov.
//
// NOTICE. This software is owned by the U.S. Department of Energy. As
// such, the U.S. Government has been granted for itself and others
// acting on its behalf a paid-up, nonexclusive, irrevocable, worldwide
// license in the Software to reproduce, prepare derivative works, and
// perform publicly and display publicly. Beginning five (5) years after
// the date permission to assert copyright is obtained from the U.S.
// Department of Energy, and subject to any subsequent five (5) year
// renewals, the U.S. Government is granted for itself and others acting
// on its behalf a paid-up, nonexclusive, irrevocable, worldwide license
// in the Software to reproduce, prepare derivative works, distribute
// copies to the public, perform publicly and display publicly, and to
// permit others to do so.
//------------------------------------------------------------------------------------------------------------------------------
// Samuel Williams
// SWWilliams@lbl.gov
// Lawrence Berkeley National Lab
//------------------------------------------------------------------------------------------------------------------------------
#include <stdio.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
//------------------------------------------------------------------------------------------------------------------------------
#ifdef USE_MPI
#include <mpi.h>
#endif
#ifdef _OPENMP
#include <omp.h>
#endif
//------------------------------------------------------------------------------------------------------------------------------
#include "defines.h"
#include "level.h"
#include "mg.h"
#include "operators.h"
#include "solvers.h"
//------------------------------------------------------------------------------------------------------------------------------
int main(int argc, char **argv){
int my_rank=0;
int num_tasks=1;
int OMP_Threads = 1;
int OMP_Nested = 0;
#ifdef _OPENMP
#pragma omp parallel
{
#pragma omp master
{
OMP_Threads = omp_get_num_threads();
OMP_Nested = omp_get_nested();
}
}
#endif
#ifdef USE_MPI
int actual_threading_model = -1;
int requested_threading_model = -1;
requested_threading_model = MPI_THREAD_SINGLE;
//requested_threading_model = MPI_THREAD_FUNNELED;
//requested_threading_model = MPI_THREAD_SERIALIZED;
//requested_threading_model = MPI_THREAD_MULTIPLE;
//MPI_Init(&argc, &argv);
#ifdef _OPENMP
requested_threading_model = MPI_THREAD_FUNNELED;
//requested_threading_model = MPI_THREAD_SERIALIZED;
//requested_threading_model = MPI_THREAD_MULTIPLE;
//MPI_Init_thread(&argc, &argv, requested_threading_model, &actual_threading_model);
#endif
MPI_Init_thread(&argc, &argv, requested_threading_model, &actual_threading_model);
MPI_Comm_size(MPI_COMM_WORLD, &num_tasks);
MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
//if(actual_threading_model>requested_threading_model)actual_threading_model=requested_threading_model;
if(my_rank==0){
if(requested_threading_model == MPI_THREAD_MULTIPLE )printf("Requested MPI_THREAD_MULTIPLE, ");
else if(requested_threading_model == MPI_THREAD_SINGLE )printf("Requested MPI_THREAD_SINGLE, ");
else if(requested_threading_model == MPI_THREAD_FUNNELED )printf("Requested MPI_THREAD_FUNNELED, ");
else if(requested_threading_model == MPI_THREAD_SERIALIZED)printf("Requested MPI_THREAD_SERIALIZED, ");
else if(requested_threading_model == MPI_THREAD_MULTIPLE )printf("Requested MPI_THREAD_MULTIPLE, ");
else printf("Requested Unknown MPI Threading Model (%d), ",requested_threading_model);
if(actual_threading_model == MPI_THREAD_MULTIPLE )printf("got MPI_THREAD_MULTIPLE\n");
else if(actual_threading_model == MPI_THREAD_SINGLE )printf("got MPI_THREAD_SINGLE\n");
else if(actual_threading_model == MPI_THREAD_FUNNELED )printf("got MPI_THREAD_FUNNELED\n");
else if(actual_threading_model == MPI_THREAD_SERIALIZED)printf("got MPI_THREAD_SERIALIZED\n");
else if(actual_threading_model == MPI_THREAD_MULTIPLE )printf("got MPI_THREAD_MULTIPLE\n");
else printf("got Unknown MPI Threading Model (%d)\n",actual_threading_model);
}
#ifdef USE_HPM // IBM HPM counters for BGQ...
HPM_Init();
#endif
#endif // USE_MPI
int log2_box_dim = 6;
int target_boxes_per_rank = 1;
if(argc==3){
log2_box_dim=atoi(argv[1]);
target_boxes_per_rank=atoi(argv[2]);
}else{
if(my_rank==0){printf("usage: ./a.out [log2_box_dim] [target_boxes_per_rank]\n");}
#ifdef USE_MPI
MPI_Finalize();
#endif
exit(0);
}
if(log2_box_dim<4){
if(my_rank==0){printf("log2_box_dim must be at least 4\n");}
#ifdef USE_MPI
MPI_Finalize();
#endif
exit(0);
}
if(target_boxes_per_rank<1){
if(my_rank==0){printf("target_boxes_per_rank must be at least 1\n");}
#ifdef USE_MPI
MPI_Finalize();
#endif
exit(0);
}
if(my_rank==0){
if(OMP_Nested)fprintf(stdout,"%d MPI Tasks of %d threads (OMP_NESTED=TRUE)\n\n" ,num_tasks,OMP_Threads);
else fprintf(stdout,"%d MPI Tasks of %d threads (OMP_NESTED=FALSE)\n\n",num_tasks,OMP_Threads);
}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// calculate the problem size...
#ifndef MAX_COARSE_DIM
#define MAX_COARSE_DIM 11
#endif
int64_t box_dim=1<<log2_box_dim;
int64_t target_boxes = (int64_t)target_boxes_per_rank*(int64_t)num_tasks;
int64_t boxes_in_i = -1;
int64_t bi;
for(bi=1;bi<1000;bi++){ // all possible problem sizes
int64_t total_boxes = bi*bi*bi;
if(total_boxes<=target_boxes){
int64_t coarse_grid_dim = box_dim*bi;
while( (coarse_grid_dim%2) == 0){coarse_grid_dim=coarse_grid_dim/2;}
if(coarse_grid_dim<=MAX_COARSE_DIM){
boxes_in_i = bi;
}
}
}
if(boxes_in_i<1){
if(my_rank==0){printf("failed to find an acceptable problem size\n");}
#ifdef USE_MPI
MPI_Finalize();
#endif
exit(0);
}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// create the fine level...
#ifdef USE_PERIODIC_BC
int bc = BC_PERIODIC;
#else
int bc = BC_DIRICHLET;
#endif
level_type fine_grid;
int ghosts=stencil_get_radius();
create_level(&fine_grid,boxes_in_i,box_dim,ghosts,VECTORS_RESERVED,bc,my_rank,num_tasks);
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#ifdef USE_HELMHOLTZ
double a=1.0;double b=1.0; // Helmholtz
if(my_rank==0)fprintf(stdout," Creating Helmholtz (a=%f, b=%f) test problem\n",a,b);
#else
double a=0.0;double b=1.0; // Poisson
if(my_rank==0)fprintf(stdout," Creating Poisson (a=%f, b=%f) test problem\n",a,b);
#endif
double h0=1.0/( (double)boxes_in_i*(double)box_dim );
initialize_problem(&fine_grid,h0,a,b); // calculate VECTOR_ALPHA, VECTOR_BETA, and VECTOR_UTRUE
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if( ((a==0.0)||(fine_grid.alpha_is_zero==1) ) && (fine_grid.boundary_condition.type == BC_PERIODIC)){
// Poisson w/ periodic BC's...
// nominally, u shifted by any constant is still a valid solution.
// However, by convention, we assume u sums to zero.
double average_value_of_u = mean(&fine_grid,VECTOR_UTRUE);
if(my_rank==0){fprintf(stdout," average value of u_true = %20.12e... shifting u_true to ensure it sums to zero...\n",average_value_of_u);}
shift_vector(&fine_grid,VECTOR_UTRUE,VECTOR_UTRUE,-average_value_of_u);
}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
//apply_op(&fine_grid,VECTOR_F,VECTOR_UTRUE,a,b); // by construction, f = A(u_true)
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if(fine_grid.boundary_condition.type == BC_PERIODIC){
double average_value_of_f = mean(&fine_grid,VECTOR_F);
if(average_value_of_f!=0.0){
if(my_rank==0){fprintf(stderr," WARNING... Periodic boundary conditions, but f does not sum to zero... mean(f)=%e\n",average_value_of_f);}
//shift_vector(&fine_grid,VECTOR_F,VECTOR_F,-average_value_of_f);
}
}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
mg_type all_grids;
int minCoarseDim = 1;
rebuild_operator(&fine_grid,NULL,a,b); // i.e. calculate Dinv and lambda_max
MGBuild(&all_grids,&fine_grid,a,b,minCoarseDim); // build the Multigrid Hierarchy
double dtol= 0.0;double rtol=1e-10; // converged if ||b-Ax|| / ||b|| < rtol
//double dtol=1e-15;double rtol= 0.0; // converged if ||D^{-1}(b-Ax)|| < dtol
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
int doTiming;
int minSolves = 10; // do at least minSolves MGSolves
double timePerSolve = 0;
for(doTiming=0;doTiming<=1;doTiming++){ // first pass warms up, second pass times
#ifdef USE_HPM // IBM performance counters for BGQ...
if(doTiming)HPM_Start("FMGSolve()");
#endif
#ifdef USE_MPI
double minTime = 30.0; // minimum time in seconds that the benchmark should run
double startTime = MPI_Wtime();
if(doTiming==1){
if((minTime/timePerSolve)>minSolves)minSolves=(minTime/timePerSolve); // if one needs to do more than minSolves to run for minTime, change minSolves
}
#endif
if(my_rank==0){
if(doTiming==0){fprintf(stdout,"\n\n===== warming up by running %d solves ===============================\n",minSolves);}
else{fprintf(stdout,"\n\n===== running %d solves =============================================\n",minSolves);}
fflush(stdout);
}
int numSolves = 0; // solves completed
MGResetTimers(&all_grids);
while( (numSolves<minSolves) ){
zero_vector(all_grids.levels[0],VECTOR_U);
#ifdef USE_FCYCLES
FMGSolve(&all_grids,VECTOR_U,VECTOR_F,a,b,dtol,rtol);
#else
MGSolve(&all_grids,VECTOR_U,VECTOR_F,a,b,dtol,rtol);
#endif
numSolves++;
}
#ifdef USE_MPI
if(doTiming==0){
double endTime = MPI_Wtime();
timePerSolve = (endTime-startTime)/numSolves;
MPI_Bcast(&timePerSolve,1,MPI_DOUBLE,0,MPI_COMM_WORLD); // after warmup, process 0 broadcasts the average time per solve (consensus)
}
#endif
#ifdef USE_HPM // IBM performance counters for BGQ...
if(doTiming)HPM_Stop("FMGSolve()");
#endif
}
MGPrintTiming(&all_grids); // don't include the error check in the timing results
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
if(my_rank==0){fprintf(stdout,"calculating error... ");}
double fine_error = error(&fine_grid,VECTOR_U,VECTOR_UTRUE);
if(my_rank==0){fprintf(stdout,"h = %22.15e ||error|| = %22.15e\n\n",h0,fine_error);fflush(stdout);}
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
// MGDestroy()
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
#ifdef USE_MPI
#ifdef USE_HPM // IBM performance counters for BGQ...
HPM_Print();
#endif
MPI_Finalize();
#endif
//- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
return(0);
}
|
volumeramsubset.h | /*********************************************************************************
*
* Inviwo - Interactive Visualization Workshop
*
* Copyright (c) 2013-2017 Inviwo Foundation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*********************************************************************************/
#ifndef IVW_VOLUMERAMSUBSET_H
#define IVW_VOLUMERAMSUBSET_H
#include <modules/base/basemoduledefine.h>
#include <inviwo/core/datastructures/volume/volumeramprecision.h>
#include <inviwo/core/datastructures/volume/volumeborder.h>
namespace inviwo {
class IVW_MODULE_BASE_API VolumeRAMSubSet {
public:
static std::shared_ptr<VolumeRAM> apply(const VolumeRepresentation* in, size3_t dim,
size3_t offset,
const VolumeBorders& border = VolumeBorders(),
bool clampBorderOutsideVolume = true);
};
namespace detail {
struct IVW_MODULE_BASE_API VolumeRAMSubSetDispatcher {
using type = std::shared_ptr<VolumeRAM>;
template <class T>
std::shared_ptr<VolumeRAM> dispatch(const VolumeRepresentation* in, size3_t dim, size3_t offset,
const VolumeBorders& border, bool clampBorderOutsideVolume);
};
template <class DataType>
std::shared_ptr<VolumeRAM> VolumeRAMSubSetDispatcher::dispatch(const VolumeRepresentation* in,
size3_t dim, size3_t offset,
const VolumeBorders& border,
bool clampBorderOutsideVolume) {
using T = typename DataType::type;
const VolumeRAMPrecision<T>* volume = dynamic_cast<const VolumeRAMPrecision<T>*>(in);
if (!volume) return nullptr;
// determine parameters
const size3_t dataDims{volume->getDimensions()};
const size3_t copyDataDims{static_cast<size3_t>(glm::max(
static_cast<ivec3>(dim) -
glm::max(static_cast<ivec3>(offset + dim) - static_cast<ivec3>(dataDims), ivec3(0)),
ivec3(0)))};
ivec3 newOffset_Dims = static_cast<ivec3>(glm::min(offset, dataDims) - border.llf);
VolumeBorders trueBorder = VolumeBorders();
VolumeBorders correctBorder = border;
if (clampBorderOutsideVolume) {
correctBorder.llf += static_cast<size3_t>(-glm::min(newOffset_Dims, ivec3(0, 0, 0)));
correctBorder.urb += static_cast<size3_t>(
-glm::min(static_cast<ivec3>(dataDims) -
static_cast<ivec3>(offset + copyDataDims + correctBorder.urb),
ivec3(0, 0, 0)));
newOffset_Dims = static_cast<ivec3>(offset - correctBorder.llf);
} else {
trueBorder.llf = static_cast<size3_t>(-glm::min(newOffset_Dims, ivec3(0, 0, 0)));
trueBorder.urb = static_cast<size3_t>(
glm::max(static_cast<ivec3>(offset + copyDataDims + correctBorder.urb) -
static_cast<ivec3>(dataDims),
ivec3(0, 0, 0)));
}
size3_t newOffset_DimsU = static_cast<size3_t>(glm::max(newOffset_Dims, ivec3(0, 0, 0)));
size_t initialStartPos = (newOffset_DimsU.z * (dataDims.x * dataDims.y)) +
(newOffset_DimsU.y * dataDims.x) + newOffset_DimsU.x;
size3_t dimsWithBorder = dim + correctBorder.llf + correctBorder.urb;
size3_t copyDimsWithoutBorder = static_cast<size3_t>(
glm::max(static_cast<ivec3>(copyDataDims + correctBorder.llf + correctBorder.urb) -
static_cast<ivec3>(trueBorder.llf) - static_cast<ivec3>(trueBorder.urb),
ivec3(1, 1, 1)));
// per row
size_t dataSize =
copyDimsWithoutBorder.x * static_cast<size_t>(volume->getDataFormat()->getSize());
// allocate space
auto newVolume =
std::make_shared<VolumeRAMPrecision<T>>(dim + correctBorder.llf + correctBorder.urb);
const T* src = static_cast<const T*>(volume->getData());
T* dst = static_cast<T*>(newVolume->getData());
// memcpy each row for every slice to form sub volume
for (int i = 0; i < static_cast<int>(copyDimsWithoutBorder.z); i++) {
#pragma omp parallel for
for (int j = 0; j < static_cast<int>(copyDimsWithoutBorder.y); j++) {
size_t volumePos = (j * dataDims.x) + (i * dataDims.x * dataDims.y);
size_t subVolumePos = ((j + trueBorder.llf.y) * dimsWithBorder.x) +
((i + trueBorder.llf.z) * dimsWithBorder.x * dimsWithBorder.y) +
trueBorder.llf.x;
std::memcpy(dst + subVolumePos, (src + volumePos + initialStartPos), dataSize);
}
}
return newVolume;
}
} // namespace
} // namespace
#endif // IVW_VOLUMERAMSUBSET_H
|
opi.c | #include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include "omp.h"
int main(int argc, char **argv) {
//seed random number generator
// Q2b: get the number of threads to run with from agrv and
// add OpenMP API code to set number of threads here
int Nthreads = atoi(argv[1]);
omp_set_num_threads(Nthreads);
struct drand48_data *drandData;
drandData = (struct drand48_data*) malloc(Nthreads*sizeof(struct drand48_data));
// Q2c: add an OpenMP parallel region here, wherein each thread initializes
// one entry in drandData using srand48_r and seed based on thread number
#pragma omp parallel
{
int rank = omp_get_thread_num();
long int seed = rank;
srand48_r(seed, drandData+rank);
}
long long int Ntrials = 10000000;
//need running tallies
long long int Ntotal=0;
long long int Ncircle=0;
double startTime = omp_get_wtime();
#pragma omp parallel for reduction(+:Ncircle) reduction(+:Ntotal)
for (long long int n=0; n<Ntrials; n++) {
int rank = omp_get_thread_num();
double rand1;
double rand2;
//gererate two random numbers (use the thread id to offset drandData)
drand48_r(drandData+rank, &rand1);
drand48_r(drandData+rank, &rand2);
double x = -1 + 2*rand1; //shift to [-1,1]
double y = -1 + 2*rand2;
//check if its in the circle
if (sqrt(x*x+y*y)<=1) Ncircle++;
Ntotal++;
// if (n%100 ==0) {
// double pi = 4.0*Ncircle/ (double) (n);
// printf("Our estimate of pi is %g \n", pi);
// }
}
double endTime = omp_get_wtime();
double pi = 4.0*Ncircle/ (double) (Ntotal);
printf("Our final estimate of pi is %g \n", pi);
printf("Runtime was %f seconds\n", endTime-startTime);
free(drandData);
return 0;
}
|
special_accumulation_ops.h | //
// @author raver119@gmail.com
//
#ifndef LIBND4J_SPECIAL_ACCUMULATION_OPS_H
#define LIBND4J_SPECIAL_ACCUMULATION_OPS_H
#include <templatemath.h>
//#include <ops/ops.h>
//#include <loops/reduce.h>
namespace simdOps {
template<typename T>
class LogSumExp {
public:
static const bool requiresSpecialAccumulation = true;
op_def static T startingValue(const T *input) {
return (T) 0.0f;
}
op_def static T merge(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T update(T old, T opOutput, T *extraParams) {
return opOutput + old;
}
op_def static T op(T d1, T d2) {
return nd4j::math::nd4j_exp<T>(d1 - d2);
}
op_def static T op(T d1, T* extraParams) {
return nd4j::math::nd4j_exp<T>(d1 - extraParams[0]);
}
op_def static T postProcess(T reduction, Nd4jIndex n, T *extraParams) {
return extraParams[0] + nd4j::math::nd4j_log<T>(reduction);
}
#ifdef __CUDACC__
__device__ static inline void aggregatePartials(T *sPartials, int tid, int numItems, T *extraParams) {
// start the shared memory loop on the next power of 2 less
// than the block size. If block size is not a power of 2,
// accumulate the intermediate sums in the remainder range.
int floorPow2 = numItems;
if (floorPow2 & (floorPow2 - 1)) {
while (floorPow2 & (floorPow2 - 1)) {
floorPow2 &= floorPow2 - 1;
}
if (tid >= floorPow2) {
sPartials[tid - floorPow2] = update(sPartials[tid - floorPow2], sPartials[tid], extraParams);
}
__syncthreads();
}
for (int activeThreads = floorPow2 >> 1; activeThreads; activeThreads >>= 1) {
if (tid < activeThreads && tid + activeThreads < numItems) {
sPartials[tid] = update(sPartials[tid], sPartials[tid + activeThreads], extraParams);
}
__syncthreads();
}
}
static inline __device__ void execSpecialCuda(
T *dx,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfo,
int *dimension,
int dimensionLength,
T *reductionBuffer,
UnifiedSharedMemory *manager,
int *tadOnlyShapeInfo,
Nd4jIndex *tadOffsets) {
// we assume that RESULT already holds max values
//shared memory space for storing intermediate results
__shared__ T *sPartials;
// __shared__ shape::TAD *tad;
__shared__ int tadLength;
__shared__ int tadRank;
__shared__ int numTads;
__shared__ int *tadShape;
__shared__ int *tadStride;
if (threadIdx.x == 0) {
extern __shared__ unsigned char shmem[];
sPartials = (T *) shmem;
tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
tadRank = shape::rank(tadOnlyShapeInfo);
numTads = shape::length(xShapeInfo) / tadLength;
tadShape = shape::shapeOf(tadOnlyShapeInfo);
tadStride = shape::stride(tadOnlyShapeInfo);
}
__syncthreads();
int xCoord[MAX_RANK];
for (int r = blockIdx.x; r < numTads; r += gridDim.x) {
Nd4jIndex tadOffsetForBlock = tadOffsets[r];
sPartials[threadIdx.x] = startingValue(dx + tadOffsetForBlock);
for (int i = threadIdx.x; i < tadLength; i += blockDim.x) {
shape::ind2subC(tadRank, tadShape, i, xCoord);
Nd4jIndex xOffset = shape::getOffset(tadOffsetForBlock, tadShape, tadStride, xCoord, tadRank);
sPartials[threadIdx.x] = update(sPartials[threadIdx.x], op(dx[xOffset], result[r]), extraParams);
}
__syncthreads();
// aggregate. do NOT reduce for elements > tadLength
aggregatePartials(sPartials, threadIdx.x, nd4j::math::nd4j_min<int>(blockDim.x, tadLength), &result[r]);
__syncthreads();
if (threadIdx.x == 0)
result[r] = postProcess(sPartials[threadIdx.x], tadLength, &result[r]);
}
}
#endif
static void execSpecial(T *x,
int *xShapeInfo,
T *extraParams,
T *result,
int *resultShapeInfoBuffer,
int *dimension,
int dimensionLength,
int *tadShapeInfo,
Nd4jIndex *tadOffset) {
int resultLength = shape::length(resultShapeInfoBuffer);
int *tadOnlyShapeInfo = tadShapeInfo;
Nd4jIndex *tadOffsets = tadOffset;
shape::TAD *tad = nullptr;
if (tadOnlyShapeInfo == nullptr || tadOffsets == nullptr) {
tad = new shape::TAD(xShapeInfo, dimension, dimensionLength);
tad->createTadOnlyShapeInfo();
tad->createOffsets();
if (tad->dimensionLength < 1) {
delete tad;
return;
}
tadOnlyShapeInfo = tad->tadOnlyShapeInfo;
tadOffsets = tad->tadOffsets;
}
const int tadLength = shape::tadLength(xShapeInfo, dimension, dimensionLength);
int numTads = shape::length(xShapeInfo) / tadLength;
int tadEWS = shape::elementWiseStride(tadOnlyShapeInfo);
int tadsPerThread = resultLength / TAD_THRESHOLD;
int num_threads = nd4j::math::nd4j_max<int>(1, tadsPerThread);
num_threads = nd4j::math::nd4j_min<int>(num_threads, omp_get_max_threads());
if (tadEWS > 0 && (numTads == 1 || shape::isVector(tadOnlyShapeInfo) || shape::isScalar(tadOnlyShapeInfo))) {
#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared)
for (int i = 0; i < resultLength; i++) {
T *iter = x + tadOffsets[i];
T start = startingValue(iter);
if (tadEWS == 1) {
for (int j = 0; j < tadLength; j++) {
start = update(start, op(iter[j], result[i]), extraParams);
}
}
else {
for (int j = 0; j < tadLength; j++) {
start = update(start, op(iter[j * tadEWS], result[i]), extraParams);
}
}
result[i] = postProcess(start, tadLength, &result[i]);
}
}
else {
int *tadShape = shape::shapeOf(tadOnlyShapeInfo);
int *tadStride = shape::stride(tadOnlyShapeInfo);
int tadRank = shape::rank(tadOnlyShapeInfo);
#pragma omp parallel for schedule(guided) num_threads(num_threads) if (num_threads > 1) proc_bind(AFFINITY) default(shared)
for (int i = 0; i < resultLength; i++) {
Nd4jIndex offset = tadOffsets[i];
int xCoord[MAX_RANK];
T start = startingValue(x + offset);
for (int j = 0; j < tadLength; j++) {
shape::ind2subC(tadRank, tadShape, j, xCoord);
Nd4jIndex xOffset = shape::getOffset(offset, tadShape, tadStride, xCoord, tadRank);
//printf("C I: %i; V: %f; op: %f\n", i, x[xOffset], op(x[xOffset], (float) result[i]));
start = update(start, op(x[xOffset], result[i]), extraParams);
}
result[i] = postProcess(start, tadLength, &result[i]);;
}
}
if (tad != nullptr)
delete tad;
}
};
}
#endif //LIBND4J_SPECIAL_ACCUMULATION_OPS_H
|
RungeSolver12_14.h | #include "../DifferentialSolver.h"
#include <assert.h>
/*
Copy-past of DormandPrince method & light differences
results to Runge-Kutta 14th order method
http://sce.uhcl.edu/rungekutta/
http://sce.uhcl.edu/rungekutta/rk1412.txt
*/
template<typename Scalar>
class RungeSolver12_14 : public DifferentialSolver<Scalar>
{
public:
RungeSolver12_14()
{
}
void SetSystem(DifferentialSystem<Scalar> *system)
{
this->system = system;
int maxDimentionsCount = system->GetMaxDimentionsCount();
currCoords = new Scalar[maxDimentionsCount];
nextCoords1 = new Scalar[maxDimentionsCount];
nextCoords2 = new Scalar[maxDimentionsCount];
derivatives = new Scalar[maxDimentionsCount];
probeCoords = new Scalar[maxDimentionsCount];
k1 = new Scalar[maxDimentionsCount];
k2 = new Scalar[maxDimentionsCount];
k3 = new Scalar[maxDimentionsCount];
k4 = new Scalar[maxDimentionsCount];
k5 = new Scalar[maxDimentionsCount];
k6 = new Scalar[maxDimentionsCount];
k7 = new Scalar[maxDimentionsCount];
k8 = new Scalar[maxDimentionsCount];
k9 = new Scalar[maxDimentionsCount];
k10 = new Scalar[maxDimentionsCount];
k11 = new Scalar[maxDimentionsCount];
k12 = new Scalar[maxDimentionsCount];
k13 = new Scalar[maxDimentionsCount];
k14 = new Scalar[maxDimentionsCount];
k15 = new Scalar[maxDimentionsCount];
k16 = new Scalar[maxDimentionsCount];
k17 = new Scalar[maxDimentionsCount];
k18 = new Scalar[maxDimentionsCount];
k19 = new Scalar[maxDimentionsCount];
k20 = new Scalar[maxDimentionsCount];
k21 = new Scalar[maxDimentionsCount];
k22 = new Scalar[maxDimentionsCount];
k23 = new Scalar[maxDimentionsCount];
k24 = new Scalar[maxDimentionsCount];
k25 = new Scalar[maxDimentionsCount];
k26 = new Scalar[maxDimentionsCount];
k27 = new Scalar[maxDimentionsCount];
k28 = new Scalar[maxDimentionsCount];
k29 = new Scalar[maxDimentionsCount];
k30 = new Scalar[maxDimentionsCount];
k31 = new Scalar[maxDimentionsCount];
k32 = new Scalar[maxDimentionsCount];
k33 = new Scalar[maxDimentionsCount];
k34 = new Scalar[maxDimentionsCount];
k35 = new Scalar[maxDimentionsCount];
this->currTime = 0;
}
virtual ~RungeSolver12_14()
{
delete[] currCoords;
delete[] nextCoords1;
delete[] nextCoords2;
delete[] derivatives;
delete[] probeCoords;
delete[] k1;
delete[] k2;
delete[] k3;
delete[] k4;
delete[] k5;
delete[] k6;
delete[] k7;
delete[] k8;
delete[] k9;
delete[] k10;
delete[] k11;
delete[] k12;
delete[] k13;
delete[] k14;
delete[] k15;
delete[] k16;
delete[] k17;
delete[] k18;
delete[] k19;
delete[] k20;
delete[] k21;
delete[] k22;
delete[] k23;
delete[] k24;
delete[] k25;
delete[] k26;
delete[] k27;
delete[] k28;
delete[] k29;
delete[] k30;
delete[] k31;
delete[] k32;
delete[] k33;
delete[] k34;
delete[] k35;
}
Scalar pow(Scalar a, Scalar b)
{
return exp(log(a) * b);
}
int GetPhasesCount()
{
return 35;
}
void InitStep(Scalar timeStep, Scalar tolerance,
int globalStepIndex, int hierarchyPhase)
{
assert(system->GetHierarchyLevelsCount() == 1);
DifferentialSolver<Scalar>::InitStep(timeStep, tolerance, globalStepIndex, hierarchyPhase);
system->GetCurrCoords(this->currTime, currCoords, globalStepIndex, hierarchyPhase);
}
virtual void AdvancePhase(int phaseIndex)
{
switch (phaseIndex)
{
case 0:
{
Scalar a0(0.000000000000000000000000000000000000000000000000000000000000);
Scalar b1_0(0.111111111111111111111111111111111111111111111111111111111111);
system->GetCurrDerivatives(k1, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b1_0 * k1[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a0, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 1:
{
Scalar a1(0.111111111111111111111111111111111111111111111111111111111111);
Scalar b2_0(-0.833333333333333333333333333333333333333333333333333333333333);
Scalar b2_1(1.38888888888888888888888888888888888888888888888888888888889);
system->GetCurrDerivatives(k2, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b2_0 * k1[coordIndex]
+ b2_1 * k2[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a1, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 2:
{
Scalar a2(0.555555555555555555555555555555555555555555555555555555555556);
Scalar b3_0(0.208333333333333333333333333333333333333333333333333333333333);
Scalar b3_1(0);
Scalar b3_2(0.625000000000000000000000000000000000000000000000000000000000);
system->GetCurrDerivatives(k3, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b3_0 * k1[coordIndex]
+ b3_1 * k2[coordIndex]
+ b3_2 * k3[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a2, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 3:
{
Scalar a3(0.833333333333333333333333333333333333333333333333333333333333);
Scalar b4_0(0.193333333333333333333333333333333333333333333333333333333333);
Scalar b4_1(0);
Scalar b4_2(0.220000000000000000000000000000000000000000000000000000000000);
Scalar b4_3(-0.0800000000000000000000000000000000000000000000000000000000000);
system->GetCurrDerivatives(k4, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b4_0 * k1[coordIndex]
+ b4_1 * k2[coordIndex]
+ b4_2 * k3[coordIndex]
+ b4_3 * k4[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a3, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 4:
{
Scalar a4(0.333333333333333333333333333333333333333333333333333333333333);
Scalar b5_0(0.100000000000000000000000000000000000000000000000000000000000);
Scalar b5_1(0);
Scalar b5_2(0);
Scalar b5_3(0.400000000000000000000000000000000000000000000000000000000000);
Scalar b5_4(0.500000000000000000000000000000000000000000000000000000000000);
system->GetCurrDerivatives(k5, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b5_0 * k1[coordIndex]
+ b5_1 * k2[coordIndex]
+ b5_2 * k3[coordIndex]
+ b5_3 * k4[coordIndex]
+ b5_4 * k5[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a4, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 5:
{
Scalar a5(1.00000000000000000000000000000000000000000000000000000000000);
Scalar b6_0(0.103484561636679776672993546511910344499744798201971316606663);
Scalar b6_1(0);
Scalar b6_2(0);
Scalar b6_3(0.122068887306407222589644082868962077139592714834162134741275);
Scalar b6_4(0.482574490331246622475134780125688112865919023850168049679402);
Scalar b6_5(-0.0381409600015606999730886240005620205664113072478411477421970);
system->GetCurrDerivatives(k6, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b6_0 * k1[coordIndex]
+ b6_1 * k2[coordIndex]
+ b6_2 * k3[coordIndex]
+ b6_3 * k4[coordIndex]
+ b6_4 * k5[coordIndex]
+ b6_5 * k6[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a5, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 6:
{
Scalar a6(0.669986979272772921764683785505998513938845229638460353285142);
Scalar b7_0(0.124380526654094412881516420868799316268491466359671423163289);
Scalar b7_1(0);
Scalar b7_2(0);
Scalar b7_3(0);
Scalar b7_4(0.226120282197584301422238662979202901196752320742633143965145);
Scalar b7_5(0.0137885887618080880607695837016477814530969417491493385363543);
Scalar b7_6(-0.0672210133996684449749399507414305856950086341525382182856200);
system->GetCurrDerivatives(k7, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b7_0 * k1[coordIndex]
+ b7_1 * k2[coordIndex]
+ b7_2 * k3[coordIndex]
+ b7_3 * k4[coordIndex]
+ b7_4 * k5[coordIndex]
+ b7_5 * k6[coordIndex]
+ b7_6 * k7[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a6, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 7:
{
Scalar a7(0.297068384213818357389584716808219413223332094698915687379168);
Scalar b8_0(0.0936919065659673815530885456083005933866349695217750085655603);
Scalar b8_1(0);
Scalar b8_2(0);
Scalar b8_3(0);
Scalar b8_4(0);
Scalar b8_5(-0.00613406843450510987229498995641664735620914507128858871007099);
Scalar b8_6(0.216019825625503063708860097659866573490979433278117320188668);
Scalar b8_7(0.423695063515761937337619073960976753205867469544123532683116);
system->GetCurrDerivatives(k8, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b8_0 * k1[coordIndex]
+ b8_1 * k2[coordIndex]
+ b8_2 * k3[coordIndex]
+ b8_3 * k4[coordIndex]
+ b8_4 * k5[coordIndex]
+ b8_5 * k6[coordIndex]
+ b8_6 * k7[coordIndex]
+ b8_7 * k8[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a7, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 8:
{
Scalar a8(0.727272727272727272727272727272727272727272727272727272727273);
Scalar b9_0(0.0838479812409052664616968791372814085980533139224911131069335);
Scalar b9_1(0);
Scalar b9_2(0);
Scalar b9_3(0);
Scalar b9_4(0);
Scalar b9_5(-0.0117949367100973814319755056031295775367961960590736150777613);
Scalar b9_6(-0.247299020568812652339473838743194598325992840353340132697498);
Scalar b9_7(0.0978080858367729012259313014081291665503740655476733940756599);
Scalar b9_8(0.217590689243420631360008651767860318344168120024782176879989);
system->GetCurrDerivatives(k9, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b9_0 * k1[coordIndex]
+ b9_1 * k2[coordIndex]
+ b9_2 * k3[coordIndex]
+ b9_3 * k4[coordIndex]
+ b9_4 * k5[coordIndex]
+ b9_5 * k6[coordIndex]
+ b9_6 * k7[coordIndex]
+ b9_7 * k8[coordIndex]
+ b9_8 * k9[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a8, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 9:
{
Scalar a9(0.140152799042188765276187487966946717629806463082532936287323);
Scalar b10_0(0.0615255359769428227954562389614314714333423969064821107453940);
Scalar b10_1(0);
Scalar b10_2(0);
Scalar b10_3(0);
Scalar b10_4(0);
Scalar b10_5(0.00592232780324503308042990005798046524738389560444257136834990);
Scalar b10_6(0.470326159963841112217224303205894113455362530746108825010848);
Scalar b10_7(0.299688863848679000853981837096192399136831121671781279184194);
Scalar b10_8(-0.247656877593994914689992276329810825853958069263947095548189);
Scalar b10_9(0.110895029771437682893999851839061714522445173600678718208625);
system->GetCurrDerivatives(k10, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b10_0 * k1[coordIndex]
+ b10_1 * k2[coordIndex]
+ b10_2 * k3[coordIndex]
+ b10_3 * k4[coordIndex]
+ b10_4 * k5[coordIndex]
+ b10_5 * k6[coordIndex]
+ b10_6 * k7[coordIndex]
+ b10_7 * k8[coordIndex]
+ b10_8 * k9[coordIndex]
+ b10_9 * k10[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a9, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 10:
{
Scalar a10(0.700701039770150737151099854830749337941407049265546408969222);
Scalar b11_0(0.0419700073362782579861792864787277787213483656543104611245994);
Scalar b11_1(0);
Scalar b11_2(0);
Scalar b11_3(0);
Scalar b11_4(0);
Scalar b11_5(-0.00317987696266205093901912847692712407988609169703103952205634);
Scalar b11_6(0.806397714906192077260821711520379506393543111567419750119748);
Scalar b11_7(0.0975983126412388979093522850684288851314672048003054550357187);
Scalar b11_8(0.778575578158398909027512446452927238999763460594181964958853);
Scalar b11_9(0.204890423831599428189499202098105603312029235081420653574829);
Scalar b11_10(-1.56261579627468188307070943950527825211462892236424360892806);
system->GetCurrDerivatives(k11, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b11_0 * k1[coordIndex]
+ b11_1 * k2[coordIndex]
+ b11_2 * k3[coordIndex]
+ b11_3 * k4[coordIndex]
+ b11_4 * k5[coordIndex]
+ b11_5 * k6[coordIndex]
+ b11_6 * k7[coordIndex]
+ b11_7 * k8[coordIndex]
+ b11_8 * k9[coordIndex]
+ b11_9 * k10[coordIndex]
+ b11_10 * k11[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a10, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 11:
{
Scalar a11(0.363636363636363636363636363636363636363636363636363636363636);
Scalar b12_0(0.0437726782233730163574465242495339811688214967071614123256973);
Scalar b12_1(0);
Scalar b12_2(0);
Scalar b12_3(0);
Scalar b12_4(0);
Scalar b12_5(0);
Scalar b12_6(0);
Scalar b12_7(0);
Scalar b12_8(0.00624365027520195208794358628580933625281631216903095917201250);
Scalar b12_9(0.200043097109577314994435165469647856829066232218264969608768);
Scalar b12_10(-0.00805328367804983036823857162048902911923392887337029314844206);
Scalar b12_11(0.0211517528067396521915711903523399601316877825157550573051221);
system->GetCurrDerivatives(k12, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b12_0 * k1[coordIndex]
+ b12_1 * k2[coordIndex]
+ b12_2 * k3[coordIndex]
+ b12_3 * k4[coordIndex]
+ b12_4 * k5[coordIndex]
+ b12_5 * k6[coordIndex]
+ b12_6 * k7[coordIndex]
+ b12_7 * k8[coordIndex]
+ b12_8 * k9[coordIndex]
+ b12_9 * k10[coordIndex]
+ b12_10 * k11[coordIndex]
+ b12_11 * k12[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a11, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 12:
{
Scalar a12(0.263157894736842105263157894736842105263157894736842105263158);
Scalar b13_0(0.0283499250363514563095023591920717312247137654896477097768495);
Scalar b13_1(0);
Scalar b13_2(0);
Scalar b13_3(0);
Scalar b13_4(0);
Scalar b13_5(0);
Scalar b13_6(0);
Scalar b13_7(0);
Scalar b13_8(0.00249163204855817407538949148805995149459884653585417680098222);
Scalar b13_9(0.0230138787854593149638399846373742768772087122638142234223658);
Scalar b13_10(-0.00322155956692977098724476092467120878189463604760620461043308);
Scalar b13_11(0.00988442549447664668946335414487885256040819982786014648129297);
Scalar b13_12(-0.0213010771328887351384307642875927384886634565429572466632092);
system->GetCurrDerivatives(k13, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b13_0 * k1[coordIndex]
+ b13_1 * k2[coordIndex]
+ b13_2 * k3[coordIndex]
+ b13_3 * k4[coordIndex]
+ b13_4 * k5[coordIndex]
+ b13_5 * k6[coordIndex]
+ b13_6 * k7[coordIndex]
+ b13_7 * k8[coordIndex]
+ b13_8 * k9[coordIndex]
+ b13_9 * k10[coordIndex]
+ b13_10 * k11[coordIndex]
+ b13_11 * k12[coordIndex]
+ b13_12 * k13[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a12, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 13:
{
Scalar a13(0.0392172246650270859125196642501208648863714315266128052078483);
Scalar b14_0(0.343511894290243001049432234735147943083353174980701426268122);
Scalar b14_1(0);
Scalar b14_2(0);
Scalar b14_3(0);
Scalar b14_4(0);
Scalar b14_5(0);
Scalar b14_6(0);
Scalar b14_7(0);
Scalar b14_8(0.210451912023627385609097011999010655788807405225626700040882);
Scalar b14_9(1.03427452057230411936482926828825709938667999698324740166559);
Scalar b14_10(0.00600303645864422487051240448206640574939078092406156945568306);
Scalar b14_11(0.855938125099619537578012106002407728915062652616416005816477);
Scalar b14_12(-0.977235005036766810872264852372525633013107656892839677696022);
Scalar b14_13(-0.660026980479294694616225013856327693720573981219974874776419);
system->GetCurrDerivatives(k14, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b14_0 * k1[coordIndex]
+ b14_1 * k2[coordIndex]
+ b14_2 * k3[coordIndex]
+ b14_3 * k4[coordIndex]
+ b14_4 * k5[coordIndex]
+ b14_5 * k6[coordIndex]
+ b14_6 * k7[coordIndex]
+ b14_7 * k8[coordIndex]
+ b14_8 * k9[coordIndex]
+ b14_9 * k10[coordIndex]
+ b14_10 * k11[coordIndex]
+ b14_11 * k12[coordIndex]
+ b14_12 * k13[coordIndex]
+ b14_13 * k14[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a13, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 14:
{
Scalar a14(0.812917502928376762983393159278036506189612372617238550774312);
Scalar b15_0(-0.0143574001672168069538206399935076366657755954378399880691949);
Scalar b15_1(0);
Scalar b15_2(0);
Scalar b15_3(0);
Scalar b15_4(0);
Scalar b15_5(0);
Scalar b15_6(0);
Scalar b15_7(0);
Scalar b15_8(-0.0366253270049039970293685796848974791733119081733552207318285);
Scalar b15_9(0.0350254975636213681976849406979846524346789082471103574920148);
Scalar b15_10(0.0360946016362113508931786658758335239823689929864237671348749);
Scalar b15_11(-0.0265219967553681106351595946834601923649627012457464284442911);
Scalar b15_12(0.0445699011305698119638911537508839908104336323082226770910408);
Scalar b15_13(0.124343093331358243286225595741786448038973408895106741855721);
Scalar b15_14(0.00413829693239480694403512496204335960426192908674476033832967);
system->GetCurrDerivatives(k15, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b15_0 * k1[coordIndex]
+ b15_1 * k2[coordIndex]
+ b15_2 * k3[coordIndex]
+ b15_3 * k4[coordIndex]
+ b15_4 * k5[coordIndex]
+ b15_5 * k6[coordIndex]
+ b15_6 * k7[coordIndex]
+ b15_7 * k8[coordIndex]
+ b15_8 * k9[coordIndex]
+ b15_9 * k10[coordIndex]
+ b15_10 * k11[coordIndex]
+ b15_11 * k12[coordIndex]
+ b15_12 * k13[coordIndex]
+ b15_13 * k14[coordIndex]
+ b15_14 * k15[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a14, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 15:
{
Scalar a15(0.166666666666666666666666666666666666666666666666666666666667);
Scalar b16_0(0.356032404425120290975609116398089176264106222379748802654822);
Scalar b16_1(0);
Scalar b16_2(0);
Scalar b16_3(0);
Scalar b16_4(0);
Scalar b16_5(0);
Scalar b16_6(0);
Scalar b16_7(0);
Scalar b16_8(-0.450192758947562595966821779075956175110645100214763601190349);
Scalar b16_9(0.430527907083710898626656292808782917793030154094709462877146);
Scalar b16_10(0.511973029011022237668556960394071692077125787030651386389972);
Scalar b16_11(0.908303638886404260390159124638110213997496214819904630546596);
Scalar b16_12(-1.23921093371933931757372469151534028854413889248605726186520);
Scalar b16_13(-0.649048661671761465141672348879062553905402831967191097656668);
Scalar b16_14(0.251708904586819292210480529948970541404887852931447491219418);
Scalar b16_15(0.779906470345586398810756795282334476023540593411550187024263);
system->GetCurrDerivatives(k16, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b16_0 * k1[coordIndex]
+ b16_1 * k2[coordIndex]
+ b16_2 * k3[coordIndex]
+ b16_3 * k4[coordIndex]
+ b16_4 * k5[coordIndex]
+ b16_5 * k6[coordIndex]
+ b16_6 * k7[coordIndex]
+ b16_7 * k8[coordIndex]
+ b16_8 * k9[coordIndex]
+ b16_9 * k10[coordIndex]
+ b16_10 * k11[coordIndex]
+ b16_11 * k12[coordIndex]
+ b16_12 * k13[coordIndex]
+ b16_13 * k14[coordIndex]
+ b16_14 * k15[coordIndex]
+ b16_15 * k16[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a15, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 16:
{
Scalar a16(0.900000000000000000000000000000000000000000000000000000000000);
Scalar b17_0(0.0130935687406513066406881206418834980127470438213192487844956);
Scalar b17_1(0);
Scalar b17_2(0);
Scalar b17_3(0);
Scalar b17_4(0);
Scalar b17_5(0);
Scalar b17_6(0);
Scalar b17_7(0);
Scalar b17_8(0);
Scalar b17_9(0);
Scalar b17_10(0);
Scalar b17_11(0);
Scalar b17_12(-0.0000932053067985113945908461962767108237858631509684667142124826);
Scalar b17_13(0.0505374334262299359640090443138590726770942344716122381702746);
Scalar b17_14(8.04470341944487979109579109610197797641311868930865361048975E-7);
Scalar b17_15(0.000591726029494171190528755742777717259844340971924321528178248);
Scalar b17_16(-4.01614722154557337064691684906375587732264247950093804676867E-7);
system->GetCurrDerivatives(k17, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b17_0 * k1[coordIndex]
+ b17_1 * k2[coordIndex]
+ b17_2 * k3[coordIndex]
+ b17_3 * k4[coordIndex]
+ b17_4 * k5[coordIndex]
+ b17_5 * k6[coordIndex]
+ b17_6 * k7[coordIndex]
+ b17_7 * k8[coordIndex]
+ b17_8 * k9[coordIndex]
+ b17_9 * k10[coordIndex]
+ b17_10 * k11[coordIndex]
+ b17_11 * k12[coordIndex]
+ b17_12 * k13[coordIndex]
+ b17_13 * k14[coordIndex]
+ b17_14 * k15[coordIndex]
+ b17_15 * k16[coordIndex]
+ b17_16 * k17[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a16, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 17:
{
Scalar a17(0.0641299257451966923312771193896682809481096651615083225402924);
Scalar b18_0(0.0207926484466053012541944544000765652167255206144373407979758);
Scalar b18_1(0);
Scalar b18_2(0);
Scalar b18_3(0);
Scalar b18_4(0);
Scalar b18_5(0);
Scalar b18_6(0);
Scalar b18_7(0);
Scalar b18_8(0);
Scalar b18_9(0);
Scalar b18_10(0);
Scalar b18_11(0);
Scalar b18_12(0.000582695918800085915101902697837284108951406103029871570103075);
Scalar b18_13(-0.00801700732358815939083342186525852746640558465919633524655451);
Scalar b18_14(4.03847643847136940375170821743560570484117290330895506618968E-6);
Scalar b18_15(0.0854609998055506144225056114567535602510114622033622491802597);
Scalar b18_16(-2.04486480935804242706707569691004307904442837552677456232848E-6);
Scalar b18_17(0.105328578824431893399799402979093997354240904235172843146582);
system->GetCurrDerivatives(k18, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b18_0 * k1[coordIndex]
+ b18_1 * k2[coordIndex]
+ b18_2 * k3[coordIndex]
+ b18_3 * k4[coordIndex]
+ b18_4 * k5[coordIndex]
+ b18_5 * k6[coordIndex]
+ b18_6 * k7[coordIndex]
+ b18_7 * k8[coordIndex]
+ b18_8 * k9[coordIndex]
+ b18_9 * k10[coordIndex]
+ b18_10 * k11[coordIndex]
+ b18_11 * k12[coordIndex]
+ b18_12 * k13[coordIndex]
+ b18_13 * k14[coordIndex]
+ b18_14 * k15[coordIndex]
+ b18_15 * k16[coordIndex]
+ b18_16 * k17[coordIndex]
+ b18_17 * k18[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a17, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 18:
{
Scalar a18(0.204149909283428848927744634301023405027149505241333751628870);
Scalar b19_0(1.40153449795736021415446247355771306718486452917597731683689);
Scalar b19_1(0);
Scalar b19_2(0);
Scalar b19_3(0);
Scalar b19_4(0);
Scalar b19_5(0);
Scalar b19_6(0);
Scalar b19_7(0);
Scalar b19_8(0);
Scalar b19_9(0);
Scalar b19_10(0);
Scalar b19_11(0);
Scalar b19_12(-0.230252000984221261616272410367415621261130298274455611733277);
Scalar b19_13(-7.21106840466912905659582237106874247165856493509961561958267);
Scalar b19_14(0.00372901560694836335236995327852132340217759566678662385552634);
Scalar b19_15(-4.71415495727125020678778179392224757011323373221820091641216);
Scalar b19_16(-0.00176367657545349242053841995032797673574903886695600132759652);
Scalar b19_17(7.64130548038698765563029310880237651185173367813936997648198);
Scalar b19_18(3.50602043659751834989896082949744710968212949893375368243588);
system->GetCurrDerivatives(k19, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b19_0 * k1[coordIndex]
+ b19_1 * k2[coordIndex]
+ b19_2 * k3[coordIndex]
+ b19_3 * k4[coordIndex]
+ b19_4 * k5[coordIndex]
+ b19_5 * k6[coordIndex]
+ b19_6 * k7[coordIndex]
+ b19_7 * k8[coordIndex]
+ b19_8 * k9[coordIndex]
+ b19_9 * k10[coordIndex]
+ b19_10 * k11[coordIndex]
+ b19_11 * k12[coordIndex]
+ b19_12 * k13[coordIndex]
+ b19_13 * k14[coordIndex]
+ b19_14 * k15[coordIndex]
+ b19_15 * k16[coordIndex]
+ b19_16 * k17[coordIndex]
+ b19_17 * k18[coordIndex]
+ b19_18 * k19[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a18, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 19:
{
Scalar a19(0.395350391048760565615671369827324372352227297456659450554577);
Scalar b20_0(11.9514650694120686799372385830716401674473610826553517297976);
Scalar b20_1(0);
Scalar b20_2(0);
Scalar b20_3(0);
Scalar b20_4(0);
Scalar b20_5(0);
Scalar b20_6(0);
Scalar b20_7(0);
Scalar b20_8(0);
Scalar b20_9(0);
Scalar b20_10(0);
Scalar b20_11(0);
Scalar b20_12(7.79480932108175968783516700231764388220284279598980948538579);
Scalar b20_13(-56.4501393867325792523560991120904281440468100061340556540132);
Scalar b20_14(0.0912376306930644901344530449290276645709607450403673704844997);
Scalar b20_15(-12.7336279925434886201945524309199275038162717529918963305155);
Scalar b20_16(-0.0396895921904719712313542810939736674712383070433147873009352);
Scalar b20_17(54.4392141883570886996225765155307791861438378423305337073797);
Scalar b20_18(-3.64411637921569236846406990361350645806721478409266709351203);
Scalar b20_19(-0.804503249910509910899030787958579499315694913210787878260459);
system->GetCurrDerivatives(k20, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b20_0 * k1[coordIndex]
+ b20_1 * k2[coordIndex]
+ b20_2 * k3[coordIndex]
+ b20_3 * k4[coordIndex]
+ b20_4 * k5[coordIndex]
+ b20_5 * k6[coordIndex]
+ b20_6 * k7[coordIndex]
+ b20_7 * k8[coordIndex]
+ b20_8 * k9[coordIndex]
+ b20_9 * k10[coordIndex]
+ b20_10 * k11[coordIndex]
+ b20_11 * k12[coordIndex]
+ b20_12 * k13[coordIndex]
+ b20_13 * k14[coordIndex]
+ b20_14 * k15[coordIndex]
+ b20_15 * k16[coordIndex]
+ b20_16 * k17[coordIndex]
+ b20_17 * k18[coordIndex]
+ b20_18 * k19[coordIndex]
+ b20_19 * k20[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a19, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 20:
{
Scalar a20(0.604649608951239434384328630172675627647772702543340549445423);
Scalar b21_0(-148.809426507100488427838868268647625561930612082148597076690);
Scalar b21_1(0);
Scalar b21_2(0);
Scalar b21_3(0);
Scalar b21_4(0);
Scalar b21_5(0);
Scalar b21_6(0);
Scalar b21_7(0);
Scalar b21_8(0);
Scalar b21_9(0);
Scalar b21_10(0);
Scalar b21_11(0);
Scalar b21_12(-91.7295278291256484357935662402321623495228729036354276506427);
Scalar b21_13(707.656144971598359834575719286335716154821128966649565194286);
Scalar b21_14(-1.10563611857482440905296961311590930801338308942637769555540);
Scalar b21_15(176.134591883811372587859898076055660406999516762301689616841);
Scalar b21_16(0.491384824214880662268898345164454557416884631402764792538746);
Scalar b21_17(-684.278000449814944358237535610895081956077167893600278300805);
Scalar b21_18(27.9910604998398258984224332124380407446002518400668657974589);
Scalar b21_19(13.1939710030282333443670964371153238435064159623744975073252);
Scalar b21_20(1.25128781283980445450114974148056006317268830077396406361417);
system->GetCurrDerivatives(k21, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b21_0 * k1[coordIndex]
+ b21_1 * k2[coordIndex]
+ b21_2 * k3[coordIndex]
+ b21_3 * k4[coordIndex]
+ b21_4 * k5[coordIndex]
+ b21_5 * k6[coordIndex]
+ b21_6 * k7[coordIndex]
+ b21_7 * k8[coordIndex]
+ b21_8 * k9[coordIndex]
+ b21_9 * k10[coordIndex]
+ b21_10 * k11[coordIndex]
+ b21_11 * k12[coordIndex]
+ b21_12 * k13[coordIndex]
+ b21_13 * k14[coordIndex]
+ b21_14 * k15[coordIndex]
+ b21_15 * k16[coordIndex]
+ b21_16 * k17[coordIndex]
+ b21_17 * k18[coordIndex]
+ b21_18 * k19[coordIndex]
+ b21_19 * k20[coordIndex]
+ b21_20 * k21[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a20, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 21:
{
Scalar a21(0.795850090716571151072255365698976594972850494758666248371130);
Scalar b22_0(-9.67307946948196763644126118433219395839951408571877262880482);
Scalar b22_1(0);
Scalar b22_2(0);
Scalar b22_3(0);
Scalar b22_4(0);
Scalar b22_5(0);
Scalar b22_6(0);
Scalar b22_7(0);
Scalar b22_8(0);
Scalar b22_9(0);
Scalar b22_10(0);
Scalar b22_11(0);
Scalar b22_12(-4.46990150858505531443846227701960360497830681408751431146712);
Scalar b22_13(45.5127128690952681968241950400052751178905907817398483534845);
Scalar b22_14(-0.0713085086183826912791492024438246129930559805352394367050813);
Scalar b22_15(11.2273614068412741582590624479939384207826800776794485051540);
Scalar b22_16(0.126244376717622724516237912909138809361786889819105426371393);
Scalar b22_17(-43.5439339549483313605810624907242107623814304467621407753424);
Scalar b22_18(0.787174307543058978398792994996550902064546091443233850464377);
Scalar b22_19(0.532264696744684215669300708603886690785395776821503851830821);
Scalar b22_20(0.422422733996325326010225127471388772575086538809603346825334);
Scalar b22_21(0.0859131249503067107308438031499859443441115056294154956487671);
system->GetCurrDerivatives(k22, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b22_0 * k1[coordIndex]
+ b22_1 * k2[coordIndex]
+ b22_2 * k3[coordIndex]
+ b22_3 * k4[coordIndex]
+ b22_4 * k5[coordIndex]
+ b22_5 * k6[coordIndex]
+ b22_6 * k7[coordIndex]
+ b22_7 * k8[coordIndex]
+ b22_8 * k9[coordIndex]
+ b22_9 * k10[coordIndex]
+ b22_10 * k11[coordIndex]
+ b22_11 * k12[coordIndex]
+ b22_12 * k13[coordIndex]
+ b22_13 * k14[coordIndex]
+ b22_14 * k15[coordIndex]
+ b22_15 * k16[coordIndex]
+ b22_16 * k17[coordIndex]
+ b22_17 * k18[coordIndex]
+ b22_18 * k19[coordIndex]
+ b22_19 * k20[coordIndex]
+ b22_20 * k21[coordIndex]
+ b22_21 * k22[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a21, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 22:
{
Scalar a22(0.935870074254803307668722880610331719051890334838491677459708);
Scalar b23_0(-10.0664032447054702403396606900426891472202824757968765569183);
Scalar b23_1(0);
Scalar b23_2(0);
Scalar b23_3(0);
Scalar b23_4(0);
Scalar b23_5(0);
Scalar b23_6(0);
Scalar b23_7(0);
Scalar b23_8(-0.0366253270049039970293685796848974791733119081733552207318285);
Scalar b23_9(0.0350254975636213681976849406979846524346789082471103574920148);
Scalar b23_10(0.0360946016362113508931786658758335239823689929864237671348749);
Scalar b23_11(-0.0265219967553681106351595946834601923649627012457464284442911);
Scalar b23_12(-6.27088972181464143590553149478871603839356122957396018530209);
Scalar b23_13(48.2079237442562989090702103008195063923492593141636117832993);
Scalar b23_14(-0.0694471689136165640882395180583732834557754169149088630301342);
Scalar b23_15(12.6810690204850295698341370913609807066108483811412127009785);
Scalar b23_16(0.0119671168968323754838161435501011294100927813964199613229864);
Scalar b23_17(-46.7249764992482408003358268242662695593201321659795608950429);
Scalar b23_18(1.33029613326626711314710039298216591399033511191227101321435);
Scalar b23_19(1.00766787503398298353438903619926657771162717793661719708370);
Scalar b23_20(0.0209512051933665091664122388475480702892770753864487241177616);
Scalar b23_21(0.0210134706331264177317735424331396407424412188443757490871603);
Scalar b23_22(0.00952196014417121794175101542454575907376360233658356240547761);
system->GetCurrDerivatives(k23, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b23_0 * k1[coordIndex]
+ b23_1 * k2[coordIndex]
+ b23_2 * k3[coordIndex]
+ b23_3 * k4[coordIndex]
+ b23_4 * k5[coordIndex]
+ b23_5 * k6[coordIndex]
+ b23_6 * k7[coordIndex]
+ b23_7 * k8[coordIndex]
+ b23_8 * k9[coordIndex]
+ b23_9 * k10[coordIndex]
+ b23_10 * k11[coordIndex]
+ b23_11 * k12[coordIndex]
+ b23_12 * k13[coordIndex]
+ b23_13 * k14[coordIndex]
+ b23_14 * k15[coordIndex]
+ b23_15 * k16[coordIndex]
+ b23_16 * k17[coordIndex]
+ b23_17 * k18[coordIndex]
+ b23_18 * k19[coordIndex]
+ b23_19 * k20[coordIndex]
+ b23_20 * k21[coordIndex]
+ b23_21 * k22[coordIndex]
+ b23_22 * k23[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a22, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 23:
{
Scalar a23(0.166666666666666666666666666666666666666666666666666666666667);
Scalar b24_0(-409.478081677743708772589097409370357624424341606752069725341);
Scalar b24_1(0);
Scalar b24_2(0);
Scalar b24_3(0);
Scalar b24_4(0);
Scalar b24_5(0);
Scalar b24_6(0);
Scalar b24_7(0);
Scalar b24_8(0.210451912023627385609097011999010655788807405225626700040882);
Scalar b24_9(1.03427452057230411936482926828825709938667999698324740166559);
Scalar b24_10(0.00600303645864422487051240448206640574939078092406156945568306);
Scalar b24_11(0.855938125099619537578012106002407728915062652616416005816477);
Scalar b24_12(-250.516998547447860492777657729316130386584050420782075966990);
Scalar b24_13(1946.42466652388427766053750328264758595829850895761428240231);
Scalar b24_14(-3.04503882102310365506105809086860882786950544097602101685174);
Scalar b24_15(490.626379528281713521208265299168083841598542274061671576230);
Scalar b24_16(1.56647589531270907115484067013597445739595615245966775329993);
Scalar b24_17(-1881.97428994011173362217267377035870619215906638453056643641);
Scalar b24_18(75.2592224724847175278837713643303149821620618914245864351135);
Scalar b24_19(34.5734356980331067622434344736554689696728644793551014989002);
Scalar b24_20(3.21147679440968961435417361847073755169022966748891627882572);
Scalar b24_21(-0.460408041738414391307201404237058848867245095265382820823055);
Scalar b24_22(-0.0870718339841810522431884137957986245724252047388936572215438);
Scalar b24_23(-7.39351814158303067567016952195521063999185773249132944724553);
system->GetCurrDerivatives(k24, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b24_0 * k1[coordIndex]
+ b24_1 * k2[coordIndex]
+ b24_2 * k3[coordIndex]
+ b24_3 * k4[coordIndex]
+ b24_4 * k5[coordIndex]
+ b24_5 * k6[coordIndex]
+ b24_6 * k7[coordIndex]
+ b24_7 * k8[coordIndex]
+ b24_8 * k9[coordIndex]
+ b24_9 * k10[coordIndex]
+ b24_10 * k11[coordIndex]
+ b24_11 * k12[coordIndex]
+ b24_12 * k13[coordIndex]
+ b24_13 * k14[coordIndex]
+ b24_14 * k15[coordIndex]
+ b24_15 * k16[coordIndex]
+ b24_16 * k17[coordIndex]
+ b24_17 * k18[coordIndex]
+ b24_18 * k19[coordIndex]
+ b24_19 * k20[coordIndex]
+ b24_20 * k21[coordIndex]
+ b24_21 * k22[coordIndex]
+ b24_22 * k23[coordIndex]
+ b24_23 * k24[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a23, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 24:
{
Scalar a24(0.812917502928376762983393159278036506189612372617238550774312);
Scalar b25_0(3.43347475853550878921093496257596781120623891072008459930197);
Scalar b25_1(0);
Scalar b25_2(0);
Scalar b25_3(0);
Scalar b25_4(0);
Scalar b25_5(0);
Scalar b25_6(0);
Scalar b25_7(0);
Scalar b25_8(0.00249163204855817407538949148805995149459884653585417680098222);
Scalar b25_9(0.0230138787854593149638399846373742768772087122638142234223658);
Scalar b25_10(-0.00322155956692977098724476092467120878189463604760620461043308);
Scalar b25_11(0.00988442549447664668946335414487885256040819982786014648129297);
Scalar b25_12(2.16252799377922507788307841904757354045759225335732707916530);
Scalar b25_13(-16.2699864546457421328065640660139489006987552040228852402716);
Scalar b25_14(-0.128534502120524552843583417470935010538029037542654506231743);
Scalar b25_15(-8.98915042666504253089307820833379330486511746063552853023189);
Scalar b25_16(-0.00348595363232025333387080201851013650192401767250513765000963);
Scalar b25_17(15.7936194113339807536235187388695574135853387025139738341334);
Scalar b25_18(-0.574403330914095065628165482017335820148383663195675408024658);
Scalar b25_19(-0.345602039021393296692722496608124982535237228827655306030152);
Scalar b25_20(-0.00662241490206585091731619991383757781133067992707418687587487);
Scalar b25_21(-0.00777788129242204164032546458607364309759347209626759111946150);
Scalar b25_22(-0.00356084192402274913338827232697437364675240818791706587952939);
Scalar b25_23(4.79282506449930799649797749629840189457296934139359048988332);
Scalar b25_24(0.153725464873068577844576387402512082757034273069877432944621);
system->GetCurrDerivatives(k25, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b25_0 * k1[coordIndex]
+ b25_1 * k2[coordIndex]
+ b25_2 * k3[coordIndex]
+ b25_3 * k4[coordIndex]
+ b25_4 * k5[coordIndex]
+ b25_5 * k6[coordIndex]
+ b25_6 * k7[coordIndex]
+ b25_7 * k8[coordIndex]
+ b25_8 * k9[coordIndex]
+ b25_9 * k10[coordIndex]
+ b25_10 * k11[coordIndex]
+ b25_11 * k12[coordIndex]
+ b25_12 * k13[coordIndex]
+ b25_13 * k14[coordIndex]
+ b25_14 * k15[coordIndex]
+ b25_15 * k16[coordIndex]
+ b25_16 * k17[coordIndex]
+ b25_17 * k18[coordIndex]
+ b25_18 * k19[coordIndex]
+ b25_19 * k20[coordIndex]
+ b25_20 * k21[coordIndex]
+ b25_21 * k22[coordIndex]
+ b25_22 * k23[coordIndex]
+ b25_23 * k24[coordIndex]
+ b25_24 * k25[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a24, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 25:
{
Scalar a25(0.0392172246650270859125196642501208648863714315266128052078483);
Scalar b26_0(32.3038520871985442326994734440031535091364975047784630088983);
Scalar b26_1(0);
Scalar b26_2(0);
Scalar b26_3(0);
Scalar b26_4(0);
Scalar b26_5(-0.00317987696266205093901912847692712407988609169703103952205634);
Scalar b26_6(0.806397714906192077260821711520379506393543111567419750119748);
Scalar b26_7(0.0975983126412388979093522850684288851314672048003054550357187);
Scalar b26_8(0.778575578158398909027512446452927238999763460594181964958853);
Scalar b26_9(0.204890423831599428189499202098105603312029235081420653574829);
Scalar b26_10(-1.56261579627468188307070943950527825211462892236424360892806);
Scalar b26_11(0);
Scalar b26_12(16.3429891882310570648504243973927174708753353504154550405647);
Scalar b26_13(-154.544555293543621230730189631471036399316683669609116705323);
Scalar b26_14(1.56971088703334872692034283417621761466263593582497085955201);
Scalar b26_15(3.27685545087248131321429817269900731165522404974733504794135);
Scalar b26_16(-0.0503489245193653176348040727199783626534081095691632396802451);
Scalar b26_17(153.321151858041665070593767885914694011224363102594556731397);
Scalar b26_18(7.17568186327720495846766484814784143567826308034865369443637);
Scalar b26_19(-2.94036748675300481945917659896930989215320594380777597403592);
Scalar b26_20(-0.0665845946076803144470749676022628870281920493197256887985612);
Scalar b26_21(-0.0462346054990843661229248668562217261176966514016859284197145);
Scalar b26_22(-0.0204198733585679401539388228617269778848579774821581777675337);
Scalar b26_23(-53.3523106438735850515953441165998107974045090495791591218714);
Scalar b26_24(-1.35548714715078654978732186705996404017554501614191325114947);
Scalar b26_25(-1.57196275801232751882901735171459249177687219114442583461866);
system->GetCurrDerivatives(k26, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b26_0 * k1[coordIndex]
+ b26_1 * k2[coordIndex]
+ b26_2 * k3[coordIndex]
+ b26_3 * k4[coordIndex]
+ b26_4 * k5[coordIndex]
+ b26_5 * k6[coordIndex]
+ b26_6 * k7[coordIndex]
+ b26_7 * k8[coordIndex]
+ b26_8 * k9[coordIndex]
+ b26_9 * k10[coordIndex]
+ b26_10 * k11[coordIndex]
+ b26_11 * k12[coordIndex]
+ b26_12 * k13[coordIndex]
+ b26_13 * k14[coordIndex]
+ b26_14 * k15[coordIndex]
+ b26_15 * k16[coordIndex]
+ b26_16 * k17[coordIndex]
+ b26_17 * k18[coordIndex]
+ b26_18 * k19[coordIndex]
+ b26_19 * k20[coordIndex]
+ b26_20 * k21[coordIndex]
+ b26_21 * k22[coordIndex]
+ b26_22 * k23[coordIndex]
+ b26_23 * k24[coordIndex]
+ b26_24 * k25[coordIndex]
+ b26_25 * k26[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a25, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 26:
{
Scalar a26(0.363636363636363636363636363636363636363636363636363636363636);
Scalar b27_0(-16.6451467486341512872031294403931758764560371130818978459405);
Scalar b27_1(0);
Scalar b27_2(0);
Scalar b27_3(0);
Scalar b27_4(0);
Scalar b27_5(0.00592232780324503308042990005798046524738389560444257136834990);
Scalar b27_6(0.470326159963841112217224303205894113455362530746108825010848);
Scalar b27_7(0.299688863848679000853981837096192399136831121671781279184194);
Scalar b27_8(-0.247656877593994914689992276329810825853958069263947095548189);
Scalar b27_9(0.110895029771437682893999851839061714522445173600678718208625);
Scalar b27_10(0);
Scalar b27_11(-0.491719043846229147070666628704194097678081907210673044988866);
Scalar b27_12(-11.4743154427289496968389492564352536350842454130853175250727);
Scalar b27_13(80.2593166576230272541702485886484400152793366623589989106256);
Scalar b27_14(-0.384132303980042847625312526759029103746926841342088219165648);
Scalar b27_15(7.28147667468107583471326950926136115767612581862877764249646);
Scalar b27_16(-0.132699384612248379510571708176035274836827341616751884314074);
Scalar b27_17(-81.0799832525730726674679289752255240006070716633632990308935);
Scalar b27_18(-1.25037492835620639521768185656179119962253747492403205797494);
Scalar b27_19(2.59263594969543681023776379504377324994226447359296887778718);
Scalar b27_20(-0.301440298346404539830163997260526875264431537275641495291993);
Scalar b27_21(0.221384460789832337451706451572773791695246839057318414301020);
Scalar b27_22(0.0827577274771892931955989870974693152996276435429809890551210);
Scalar b27_23(18.9960662040611520464672450037243263998175161412237156872211);
Scalar b27_24(0.269231946409639685623468015128334167460051910348912845121977);
Scalar b27_25(1.62674827447066537462989364929628933988125029284183680279020);
Scalar b27_26(0.491719043846229147070666628704194097678081907210673044988866);
system->GetCurrDerivatives(k27, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b27_0 * k1[coordIndex]
+ b27_1 * k2[coordIndex]
+ b27_2 * k3[coordIndex]
+ b27_3 * k4[coordIndex]
+ b27_4 * k5[coordIndex]
+ b27_5 * k6[coordIndex]
+ b27_6 * k7[coordIndex]
+ b27_7 * k8[coordIndex]
+ b27_8 * k9[coordIndex]
+ b27_9 * k10[coordIndex]
+ b27_10 * k11[coordIndex]
+ b27_11 * k12[coordIndex]
+ b27_12 * k13[coordIndex]
+ b27_13 * k14[coordIndex]
+ b27_14 * k15[coordIndex]
+ b27_15 * k16[coordIndex]
+ b27_16 * k17[coordIndex]
+ b27_17 * k18[coordIndex]
+ b27_18 * k19[coordIndex]
+ b27_19 * k20[coordIndex]
+ b27_20 * k21[coordIndex]
+ b27_21 * k22[coordIndex]
+ b27_22 * k23[coordIndex]
+ b27_23 * k24[coordIndex]
+ b27_24 * k25[coordIndex]
+ b27_25 * k26[coordIndex]
+ b27_26 * k27[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a26, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 27:
{
Scalar a27(0.700701039770150737151099854830749337941407049265546408969222);
Scalar b28_0(0.0838479812409052664616968791372814085980533139224911131069335);
Scalar b28_1(0);
Scalar b28_2(0);
Scalar b28_3(0);
Scalar b28_4(0);
Scalar b28_5(-0.0117949367100973814319755056031295775367961960590736150777613);
Scalar b28_6(-0.247299020568812652339473838743194598325992840353340132697498);
Scalar b28_7(0.0978080858367729012259313014081291665503740655476733940756599);
Scalar b28_8(0.217590689243420631360008651767860318344168120024782176879989);
Scalar b28_9(0);
Scalar b28_10(0.137585606763325224865659632196787746647447222975084865975440);
Scalar b28_11(0.0439870229715046685058790092341545026046103890294261359042581);
Scalar b28_12(0);
Scalar b28_13(-0.513700813768193341957004456618630303738757363641964030086972);
Scalar b28_14(0.826355691151315508644211308399153458701423158616168576922372);
Scalar b28_15(25.7018139719811832625873882972519939511136556341960074626615);
Scalar b28_16(0);
Scalar b28_17(0);
Scalar b28_18(0);
Scalar b28_19(0);
Scalar b28_20(0);
Scalar b28_21(0);
Scalar b28_22(0);
Scalar b28_23(-25.7018139719811832625873882972519939511136556341960074626615);
Scalar b28_24(-0.826355691151315508644211308399153458701423158616168576922372);
Scalar b28_25(0.513700813768193341957004456618630303738757363641964030086972);
Scalar b28_26(-0.0439870229715046685058790092341545026046103890294261359042581);
Scalar b28_27(-0.137585606763325224865659632196787746647447222975084865975440);
system->GetCurrDerivatives(k28, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b28_0 * k1[coordIndex]
+ b28_1 * k2[coordIndex]
+ b28_2 * k3[coordIndex]
+ b28_3 * k4[coordIndex]
+ b28_4 * k5[coordIndex]
+ b28_5 * k6[coordIndex]
+ b28_6 * k7[coordIndex]
+ b28_7 * k8[coordIndex]
+ b28_8 * k9[coordIndex]
+ b28_9 * k10[coordIndex]
+ b28_10 * k11[coordIndex]
+ b28_11 * k12[coordIndex]
+ b28_12 * k13[coordIndex]
+ b28_13 * k14[coordIndex]
+ b28_14 * k15[coordIndex]
+ b28_15 * k16[coordIndex]
+ b28_16 * k17[coordIndex]
+ b28_17 * k18[coordIndex]
+ b28_18 * k19[coordIndex]
+ b28_19 * k20[coordIndex]
+ b28_20 * k21[coordIndex]
+ b28_21 * k22[coordIndex]
+ b28_22 * k23[coordIndex]
+ b28_23 * k24[coordIndex]
+ b28_24 * k25[coordIndex]
+ b28_25 * k26[coordIndex]
+ b28_26 * k27[coordIndex]
+ b28_27 * k28[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a27, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 28:
{
Scalar a28(0.140152799042188765276187487966946717629806463082532936287323);
Scalar b29_0(0.124380526654094412881516420868799316268491466359671423163289);
Scalar b29_1(0);
Scalar b29_2(0);
Scalar b29_3(0);
Scalar b29_4(0.226120282197584301422238662979202901196752320742633143965145);
Scalar b29_5(0.0137885887618080880607695837016477814530969417491493385363543);
Scalar b29_6(-0.0672210133996684449749399507414305856950086341525382182856200);
Scalar b29_7(0);
Scalar b29_8(0);
Scalar b29_9(-0.856238975085428354755349769879501772112121597411563802855067);
Scalar b29_10(-1.96337522866858908928262850028093813988180440518267404553576);
Scalar b29_11(-0.232332822724119401237246257308921847250108199230419994978218);
Scalar b29_12(0);
Scalar b29_13(4.30660719086453349461668936876562947772432562053478092626764);
Scalar b29_14(-2.92722963249465482659787911202390446687687394950633612630592);
Scalar b29_15(-82.3131666397858944454492334105458707735761966428138676971041);
Scalar b29_16(0);
Scalar b29_17(0);
Scalar b29_18(0);
Scalar b29_19(0);
Scalar b29_20(0);
Scalar b29_21(0);
Scalar b29_22(0);
Scalar b29_23(82.3131666397858944454492334105458707735761966428138676971041);
Scalar b29_24(2.92722963249465482659787911202390446687687394950633612630592);
Scalar b29_25(-4.30660719086453349461668936876562947772432562053478092626764);
Scalar b29_26(0.232332822724119401237246257308921847250108199230419994978218);
Scalar b29_27(1.96337522866858908928262850028093813988180440518267404553576);
Scalar b29_28(0.856238975085428354755349769879501772112121597411563802855067);
system->GetCurrDerivatives(k29, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b29_0 * k1[coordIndex]
+ b29_1 * k2[coordIndex]
+ b29_2 * k3[coordIndex]
+ b29_3 * k4[coordIndex]
+ b29_4 * k5[coordIndex]
+ b29_5 * k6[coordIndex]
+ b29_6 * k7[coordIndex]
+ b29_7 * k8[coordIndex]
+ b29_8 * k9[coordIndex]
+ b29_9 * k10[coordIndex]
+ b29_10 * k11[coordIndex]
+ b29_11 * k12[coordIndex]
+ b29_12 * k13[coordIndex]
+ b29_13 * k14[coordIndex]
+ b29_14 * k15[coordIndex]
+ b29_15 * k16[coordIndex]
+ b29_16 * k17[coordIndex]
+ b29_17 * k18[coordIndex]
+ b29_18 * k19[coordIndex]
+ b29_19 * k20[coordIndex]
+ b29_20 * k21[coordIndex]
+ b29_21 * k22[coordIndex]
+ b29_22 * k23[coordIndex]
+ b29_23 * k24[coordIndex]
+ b29_24 * k25[coordIndex]
+ b29_25 * k26[coordIndex]
+ b29_26 * k27[coordIndex]
+ b29_27 * k28[coordIndex]
+ b29_28 * k29[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a28, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 29:
{
Scalar a29(0.297068384213818357389584716808219413223332094698915687379168);
Scalar b30_0(0.103484561636679776672993546511910344499744798201971316606663);
Scalar b30_1(0);
Scalar b30_2(0);
Scalar b30_3(0.122068887306407222589644082868962077139592714834162134741275);
Scalar b30_4(0.482574490331246622475134780125688112865919023850168049679402);
Scalar b30_5(-0.0381409600015606999730886240005620205664113072478411477421970);
Scalar b30_6(0);
Scalar b30_7(-0.550499525310802324138388507020508177411414311000037561712836);
Scalar b30_8(0);
Scalar b30_9(-0.711915811585189227887648262043794387578291882406745570495765);
Scalar b30_10(-0.584129605671551340432988730158480872095335329645227595707052);
Scalar b30_11(0);
Scalar b30_12(0);
Scalar b30_13(2.11046308125864932128717300046622750300375054278936987850718);
Scalar b30_14(-0.0837494736739572135525742023001037992695260175335123517729291);
Scalar b30_15(5.10021499072320914075295969043344113107545060862804249161191);
Scalar b30_16(0);
Scalar b30_17(0);
Scalar b30_18(0);
Scalar b30_19(0);
Scalar b30_20(0);
Scalar b30_21(0);
Scalar b30_22(0);
Scalar b30_23(-5.10021499072320914075295969043344113107545060862804249161191);
Scalar b30_24(0.0837494736739572135525742023001037992695260175335123517729291);
Scalar b30_25(-2.11046308125864932128717300046622750300375054278936987850718);
Scalar b30_26(0);
Scalar b30_27(0.584129605671551340432988730158480872095335329645227595707052);
Scalar b30_28(0.711915811585189227887648262043794387578291882406745570495765);
Scalar b30_29(0.550499525310802324138388507020508177411414311000037561712836);
system->GetCurrDerivatives(k30, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b30_0 * k1[coordIndex]
+ b30_1 * k2[coordIndex]
+ b30_2 * k3[coordIndex]
+ b30_3 * k4[coordIndex]
+ b30_4 * k5[coordIndex]
+ b30_5 * k6[coordIndex]
+ b30_6 * k7[coordIndex]
+ b30_7 * k8[coordIndex]
+ b30_8 * k9[coordIndex]
+ b30_9 * k10[coordIndex]
+ b30_10 * k11[coordIndex]
+ b30_11 * k12[coordIndex]
+ b30_12 * k13[coordIndex]
+ b30_13 * k14[coordIndex]
+ b30_14 * k15[coordIndex]
+ b30_15 * k16[coordIndex]
+ b30_16 * k17[coordIndex]
+ b30_17 * k18[coordIndex]
+ b30_18 * k19[coordIndex]
+ b30_19 * k20[coordIndex]
+ b30_20 * k21[coordIndex]
+ b30_21 * k22[coordIndex]
+ b30_22 * k23[coordIndex]
+ b30_23 * k24[coordIndex]
+ b30_24 * k25[coordIndex]
+ b30_25 * k26[coordIndex]
+ b30_26 * k27[coordIndex]
+ b30_27 * k28[coordIndex]
+ b30_28 * k29[coordIndex]
+ b30_29 * k30[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a29, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 30:
{
Scalar a30(0.669986979272772921764683785505998513938845229638460353285142);
Scalar b31_0(0.193333333333333333333333333333333333333333333333333333333333);
Scalar b31_1(0);
Scalar b31_2(0.220000000000000000000000000000000000000000000000000000000000);
Scalar b31_3(-0.0800000000000000000000000000000000000000000000000000000000000);
Scalar b31_4(0);
Scalar b31_5(0);
Scalar b31_6(0.109993425580724703919462404865068340845119058295846426463652);
Scalar b31_7(-0.254297048076270161384068506997153122141835626976703920846242);
Scalar b31_8(0);
Scalar b31_9(0.865570777116694254343770343821098281832847401233011859346737);
Scalar b31_10(3.32416449114093083106799552786572018336860092936986407160200);
Scalar b31_11(0);
Scalar b31_12(0);
Scalar b31_13(-12.0102223315977933882352385148661841260301942633996815127277);
Scalar b31_14(0.476601466242493239430442776862061899602963782003580209476163);
Scalar b31_15(-29.0243011221036390525802623213654099596251221332470910692353);
Scalar b31_16(0);
Scalar b31_17(0);
Scalar b31_18(0);
Scalar b31_19(0);
Scalar b31_20(0);
Scalar b31_21(0);
Scalar b31_22(0);
Scalar b31_23(29.0243011221036390525802623213654099596251221332470910692353);
Scalar b31_24(-0.476601466242493239430442776862061899602963782003580209476163);
Scalar b31_25(12.0102223315977933882352385148661841260301942633996815127277);
Scalar b31_26(0);
Scalar b31_27(-3.32416449114093083106799552786572018336860092936986407160200);
Scalar b31_28(-0.865570777116694254343770343821098281832847401233011859346737);
Scalar b31_29(0.254297048076270161384068506997153122141835626976703920846242);
Scalar b31_30(-0.109993425580724703919462404865068340845119058295846426463652);
system->GetCurrDerivatives(k31, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b31_0 * k1[coordIndex]
+ b31_1 * k2[coordIndex]
+ b31_2 * k3[coordIndex]
+ b31_3 * k4[coordIndex]
+ b31_4 * k5[coordIndex]
+ b31_5 * k6[coordIndex]
+ b31_6 * k7[coordIndex]
+ b31_7 * k8[coordIndex]
+ b31_8 * k9[coordIndex]
+ b31_9 * k10[coordIndex]
+ b31_10 * k11[coordIndex]
+ b31_11 * k12[coordIndex]
+ b31_12 * k13[coordIndex]
+ b31_13 * k14[coordIndex]
+ b31_14 * k15[coordIndex]
+ b31_15 * k16[coordIndex]
+ b31_16 * k17[coordIndex]
+ b31_17 * k18[coordIndex]
+ b31_18 * k19[coordIndex]
+ b31_19 * k20[coordIndex]
+ b31_20 * k21[coordIndex]
+ b31_21 * k22[coordIndex]
+ b31_22 * k23[coordIndex]
+ b31_23 * k24[coordIndex]
+ b31_24 * k25[coordIndex]
+ b31_25 * k26[coordIndex]
+ b31_26 * k27[coordIndex]
+ b31_27 * k28[coordIndex]
+ b31_28 * k29[coordIndex]
+ b31_29 * k30[coordIndex]
+ b31_30 * k31[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a30, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 31:
{
Scalar a31(0.333333333333333333333333333333333333333333333333333333333333);
Scalar b32_0(-0.833333333333333333333333333333333333333333333333333333333333);
Scalar b32_1(1.38888888888888888888888888888888888888888888888888888888889);
Scalar b32_2(0);
Scalar b32_3(0);
Scalar b32_4(-0.750000000000000000000000000000000000000000000000000000000000);
Scalar b32_5(0);
Scalar b32_6(-0.492529543718026304422682049114021320200214681580657784719074);
Scalar b32_7(0);
Scalar b32_8(0);
Scalar b32_9(0);
Scalar b32_10(0);
Scalar b32_11(0);
Scalar b32_12(0);
Scalar b32_13(0);
Scalar b32_14(0);
Scalar b32_15(0);
Scalar b32_16(0);
Scalar b32_17(0);
Scalar b32_18(0);
Scalar b32_19(0);
Scalar b32_20(0);
Scalar b32_21(0);
Scalar b32_22(0);
Scalar b32_23(0);
Scalar b32_24(0);
Scalar b32_25(0);
Scalar b32_26(0);
Scalar b32_27(0);
Scalar b32_28(0);
Scalar b32_29(0);
Scalar b32_30(0.492529543718026304422682049114021320200214681580657784719074);
Scalar b32_31(0.750000000000000000000000000000000000000000000000000000000000);
system->GetCurrDerivatives(k32, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b32_0 * k1[coordIndex]
+ b32_1 * k2[coordIndex]
+ b32_2 * k3[coordIndex]
+ b32_3 * k4[coordIndex]
+ b32_4 * k5[coordIndex]
+ b32_5 * k6[coordIndex]
+ b32_6 * k7[coordIndex]
+ b32_7 * k8[coordIndex]
+ b32_8 * k9[coordIndex]
+ b32_9 * k10[coordIndex]
+ b32_10 * k11[coordIndex]
+ b32_11 * k12[coordIndex]
+ b32_12 * k13[coordIndex]
+ b32_13 * k14[coordIndex]
+ b32_14 * k15[coordIndex]
+ b32_15 * k16[coordIndex]
+ b32_16 * k17[coordIndex]
+ b32_17 * k18[coordIndex]
+ b32_18 * k19[coordIndex]
+ b32_19 * k20[coordIndex]
+ b32_20 * k21[coordIndex]
+ b32_21 * k22[coordIndex]
+ b32_22 * k23[coordIndex]
+ b32_23 * k24[coordIndex]
+ b32_24 * k25[coordIndex]
+ b32_25 * k26[coordIndex]
+ b32_26 * k27[coordIndex]
+ b32_27 * k28[coordIndex]
+ b32_28 * k29[coordIndex]
+ b32_29 * k30[coordIndex]
+ b32_30 * k31[coordIndex]
+ b32_31 * k32[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a31, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 32:
{
Scalar a32(0.555555555555555555555555555555555555555555555555555555555556);
Scalar b33_0(0.111111111111111111111111111111111111111111111111111111111111);
Scalar b33_1(0);
Scalar b33_2(-0.222222222222222222222222222222222222222222222222222222222222);
Scalar b33_3(0);
Scalar b33_4(0);
Scalar b33_5(0);
Scalar b33_6(0);
Scalar b33_7(0);
Scalar b33_8(0);
Scalar b33_9(0);
Scalar b33_10(0);
Scalar b33_11(0);
Scalar b33_12(0);
Scalar b33_13(0);
Scalar b33_14(0);
Scalar b33_15(0);
Scalar b33_16(0);
Scalar b33_17(0);
Scalar b33_18(0);
Scalar b33_19(0);
Scalar b33_20(0);
Scalar b33_21(0);
Scalar b33_22(0);
Scalar b33_23(0);
Scalar b33_24(0);
Scalar b33_25(0);
Scalar b33_26(0);
Scalar b33_27(0);
Scalar b33_28(0);
Scalar b33_29(0);
Scalar b33_30(0);
Scalar b33_31(0);
Scalar b33_32(0.222222222222222222222222222222222222222222222222222222222222);
system->GetCurrDerivatives(k33, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b33_0 * k1[coordIndex]
+ b33_1 * k2[coordIndex]
+ b33_2 * k3[coordIndex]
+ b33_3 * k4[coordIndex]
+ b33_4 * k5[coordIndex]
+ b33_5 * k6[coordIndex]
+ b33_6 * k7[coordIndex]
+ b33_7 * k8[coordIndex]
+ b33_8 * k9[coordIndex]
+ b33_9 * k10[coordIndex]
+ b33_10 * k11[coordIndex]
+ b33_11 * k12[coordIndex]
+ b33_12 * k13[coordIndex]
+ b33_13 * k14[coordIndex]
+ b33_14 * k15[coordIndex]
+ b33_15 * k16[coordIndex]
+ b33_16 * k17[coordIndex]
+ b33_17 * k18[coordIndex]
+ b33_18 * k19[coordIndex]
+ b33_19 * k20[coordIndex]
+ b33_20 * k21[coordIndex]
+ b33_21 * k22[coordIndex]
+ b33_22 * k23[coordIndex]
+ b33_23 * k24[coordIndex]
+ b33_24 * k25[coordIndex]
+ b33_25 * k26[coordIndex]
+ b33_26 * k27[coordIndex]
+ b33_27 * k28[coordIndex]
+ b33_28 * k29[coordIndex]
+ b33_29 * k30[coordIndex]
+ b33_30 * k31[coordIndex]
+ b33_31 * k32[coordIndex]
+ b33_32 * k33[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a32, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 33:
{
Scalar a33(0.111111111111111111111111111111111111111111111111111111111111);
Scalar b34_0(0.285835140388971558796088842163836414852927537894596466840753);
Scalar b34_1(0.291666666666666666666666666666666666666666666666666666666667);
Scalar b34_2(0.218750000000000000000000000000000000000000000000000000000000);
Scalar b34_3(0);
Scalar b34_4(0.164062500000000000000000000000000000000000000000000000000000);
Scalar b34_5(0);
Scalar b34_6(0.218194354945556658327188241581352107093288824322187941141516);
Scalar b34_7(0.180392898478697766863635221946775437719620053641849228562435);
Scalar b34_8(0);
Scalar b34_9(0.205713839404845018859120755122929542277570094982808905393991);
Scalar b34_10(0.242715791581770239970282927959446515762745971386670541948576);
Scalar b34_11(0.246465780813629305833609291181891407799228103869305705137021);
Scalar b34_12(-3.44991940790890824979834154601622662060370460614931644223924);
Scalar b34_13(0.228875562160036081760729060738458584294220372552740218459295);
Scalar b34_14(0.283290599702151415321527419056733335978436595493855789831434);
Scalar b34_15(3.21085125837766640960131490544236787005557320332238705967955);
Scalar b34_16(-0.223538777364845699920233756214162507964125230083674032084065);
Scalar b34_17(-0.707121157204419073518727286207487212130091231955206160635271);
Scalar b34_18(3.21123345150287080408174729202856500893260034443022374267639);
Scalar b34_19(1.40954348309669766030414474301123175769045945573548986335553);
Scalar b34_20(-0.151362053443742613121602276742518111090963026203676055891793);
Scalar b34_21(0.372350574527014276454724080214619984397121028202148298716575);
Scalar b34_22(0.252978746406361336722199907762141285915775728129414319261111);
Scalar b34_23(-3.21085125837766640960131490544236787005557320332238705967955);
Scalar b34_24(-0.283290599702151415321527419056733335978436595493855789831434);
Scalar b34_25(-0.228875562160036081760729060738458584294220372552740218459295);
Scalar b34_26(-0.246465780813629305833609291181891407799228103869305705137021);
Scalar b34_27(-0.242715791581770239970282927959446515762745971386670541948576);
Scalar b34_28(-0.205713839404845018859120755122929542277570094982808905393991);
Scalar b34_29(-0.180392898478697766863635221946775437719620053641849228562435);
Scalar b34_30(-0.218194354945556658327188241581352107093288824322187941141516);
Scalar b34_31(-0.164062500000000000000000000000000000000000000000000000000000);
Scalar b34_32(-0.218750000000000000000000000000000000000000000000000000000000);
Scalar b34_33(-0.291666666666666666666666666666666666666666666666666666666667);
system->GetCurrDerivatives(k34, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
probeCoords[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b34_0 * k1[coordIndex]
+ b34_1 * k2[coordIndex]
+ b34_2 * k3[coordIndex]
+ b34_3 * k4[coordIndex]
+ b34_4 * k5[coordIndex]
+ b34_5 * k6[coordIndex]
+ b34_6 * k7[coordIndex]
+ b34_7 * k8[coordIndex]
+ b34_8 * k9[coordIndex]
+ b34_9 * k10[coordIndex]
+ b34_10 * k11[coordIndex]
+ b34_11 * k12[coordIndex]
+ b34_12 * k13[coordIndex]
+ b34_13 * k14[coordIndex]
+ b34_14 * k15[coordIndex]
+ b34_15 * k16[coordIndex]
+ b34_16 * k17[coordIndex]
+ b34_17 * k18[coordIndex]
+ b34_18 * k19[coordIndex]
+ b34_19 * k20[coordIndex]
+ b34_20 * k21[coordIndex]
+ b34_21 * k22[coordIndex]
+ b34_22 * k23[coordIndex]
+ b34_23 * k24[coordIndex]
+ b34_24 * k25[coordIndex]
+ b34_25 * k26[coordIndex]
+ b34_26 * k27[coordIndex]
+ b34_27 * k28[coordIndex]
+ b34_28 * k29[coordIndex]
+ b34_29 * k30[coordIndex]
+ b34_30 * k31[coordIndex]
+ b34_31 * k32[coordIndex]
+ b34_32 * k33[coordIndex]
+ b34_33 * k34[coordIndex]
);
}
system->SetCurrCoords(this->currTime + this->timeStep *a33, probeCoords, this->globalStepIndex, this->hierarchyPhase);
} break;
case 34:
{
Scalar a33(0.111111111111111111111111111111111111111111111111111111111111);
Scalar a34(1.00000000000000000000000000000000000000000000000000000000000);
Scalar c0(0.0178571428571428571428571428571428571428571428571428571428571);
Scalar c1(0.00585937500000000000000000000000000000000000000000000000000000);
Scalar c2(0.0117187500000000000000000000000000000000000000000000000000000);
Scalar c3(0);
Scalar c4(0.0175781250000000000000000000000000000000000000000000000000000);
Scalar c5(0);
Scalar c6(0.0234375000000000000000000000000000000000000000000000000000000);
Scalar c7(0.0292968750000000000000000000000000000000000000000000000000000);
Scalar c8(0);
Scalar c9(0.0351562500000000000000000000000000000000000000000000000000000);
Scalar c10(0.0410156250000000000000000000000000000000000000000000000000000);
Scalar c11(0.0468750000000000000000000000000000000000000000000000000000000);
Scalar c12(0);
Scalar c13(0.0527343750000000000000000000000000000000000000000000000000000);
Scalar c14(0.0585937500000000000000000000000000000000000000000000000000000);
Scalar c15(0.0644531250000000000000000000000000000000000000000000000000000);
Scalar c16(0);
Scalar c17(0.105352113571753019691496032887878162227673083080523884041670);
Scalar c18(0.170561346241752182382120338553874085887555487802790804737501);
Scalar c19(0.206229397329351940783526485701104894741914286259542454077972);
Scalar c20(0.206229397329351940783526485701104894741914286259542454077972);
Scalar c21(0.170561346241752182382120338553874085887555487802790804737501);
Scalar c22(0.105352113571753019691496032887878162227673083080523884041670);
Scalar c23(-0.0644531250000000000000000000000000000000000000000000000000000);
Scalar c24(-0.0585937500000000000000000000000000000000000000000000000000000);
Scalar c25(-0.0527343750000000000000000000000000000000000000000000000000000);
Scalar c26(-0.0468750000000000000000000000000000000000000000000000000000000);
Scalar c27(-0.0410156250000000000000000000000000000000000000000000000000000);
Scalar c28(-0.0351562500000000000000000000000000000000000000000000000000000);
Scalar c29(-0.0292968750000000000000000000000000000000000000000000000000000);
Scalar c30(-0.0234375000000000000000000000000000000000000000000000000000000);
Scalar c31(-0.0175781250000000000000000000000000000000000000000000000000000);
Scalar c32(-0.0117187500000000000000000000000000000000000000000000000000000);
Scalar c33(-0.00585937500000000000000000000000000000000000000000000000000000);
Scalar c34(0.0178571428571428571428571428571428571428571428571428571428571);
Scalar b34_0(0.285835140388971558796088842163836414852927537894596466840753);
Scalar b34_1(0.291666666666666666666666666666666666666666666666666666666667);
Scalar b34_2(0.218750000000000000000000000000000000000000000000000000000000);
Scalar b34_3(0);
Scalar b34_4(0.164062500000000000000000000000000000000000000000000000000000);
Scalar b34_5(0);
Scalar b34_6(0.218194354945556658327188241581352107093288824322187941141516);
Scalar b34_7(0.180392898478697766863635221946775437719620053641849228562435);
Scalar b34_8(0);
Scalar b34_9(0.205713839404845018859120755122929542277570094982808905393991);
Scalar b34_10(0.242715791581770239970282927959446515762745971386670541948576);
Scalar b34_11(0.246465780813629305833609291181891407799228103869305705137021);
Scalar b34_12(-3.44991940790890824979834154601622662060370460614931644223924);
Scalar b34_13(0.228875562160036081760729060738458584294220372552740218459295);
Scalar b34_14(0.283290599702151415321527419056733335978436595493855789831434);
Scalar b34_15(3.21085125837766640960131490544236787005557320332238705967955);
Scalar b34_16(-0.223538777364845699920233756214162507964125230083674032084065);
Scalar b34_17(-0.707121157204419073518727286207487212130091231955206160635271);
Scalar b34_18(3.21123345150287080408174729202856500893260034443022374267639);
Scalar b34_19(1.40954348309669766030414474301123175769045945573548986335553);
Scalar b34_20(-0.151362053443742613121602276742518111090963026203676055891793);
Scalar b34_21(0.372350574527014276454724080214619984397121028202148298716575);
Scalar b34_22(0.252978746406361336722199907762141285915775728129414319261111);
Scalar b34_23(-3.21085125837766640960131490544236787005557320332238705967955);
Scalar b34_24(-0.283290599702151415321527419056733335978436595493855789831434);
Scalar b34_25(-0.228875562160036081760729060738458584294220372552740218459295);
Scalar b34_26(-0.246465780813629305833609291181891407799228103869305705137021);
Scalar b34_27(-0.242715791581770239970282927959446515762745971386670541948576);
Scalar b34_28(-0.205713839404845018859120755122929542277570094982808905393991);
Scalar b34_29(-0.180392898478697766863635221946775437719620053641849228562435);
Scalar b34_30(-0.218194354945556658327188241581352107093288824322187941141516);
Scalar b34_31(-0.164062500000000000000000000000000000000000000000000000000000);
Scalar b34_32(-0.218750000000000000000000000000000000000000000000000000000000);
Scalar b34_33(-0.291666666666666666666666666666666666666666666666666666666667);
system->GetCurrDerivatives(k35, this->globalStepIndex, this->hierarchyPhase);
#pragma omp parallel for
for (int coordIndex = 0; coordIndex < system->GetDimentionsCount(this->globalStepIndex, this->hierarchyPhase); coordIndex++)
{
nextCoords1[coordIndex] = currCoords[coordIndex] + this->timeStep * (
b34_0 * k1[coordIndex]
+ b34_1 * k2[coordIndex]
+ b34_2 * k3[coordIndex]
+ b34_3 * k4[coordIndex]
+ b34_4 * k5[coordIndex]
+ b34_5 * k6[coordIndex]
+ b34_6 * k7[coordIndex]
+ b34_7 * k8[coordIndex]
+ b34_8 * k9[coordIndex]
+ b34_9 * k10[coordIndex]
+ b34_10 * k11[coordIndex]
+ b34_11 * k12[coordIndex]
+ b34_12 * k13[coordIndex]
+ b34_13 * k14[coordIndex]
+ b34_14 * k15[coordIndex]
+ b34_15 * k16[coordIndex]
+ b34_16 * k17[coordIndex]
+ b34_17 * k18[coordIndex]
+ b34_18 * k19[coordIndex]
+ b34_19 * k20[coordIndex]
+ b34_20 * k21[coordIndex]
+ b34_21 * k22[coordIndex]
+ b34_22 * k23[coordIndex]
+ b34_23 * k24[coordIndex]
+ b34_24 * k25[coordIndex]
+ b34_25 * k26[coordIndex]
+ b34_26 * k27[coordIndex]
+ b34_27 * k28[coordIndex]
+ b34_28 * k29[coordIndex]
+ b34_29 * k30[coordIndex]
+ b34_30 * k31[coordIndex]
+ b34_31 * k32[coordIndex]
+ b34_32 * k33[coordIndex]
+ b34_33 * k34[coordIndex]
);
nextCoords2[coordIndex] = currCoords[coordIndex] + this->timeStep * (
c0 * k1[coordIndex]
+ c1 * k2[coordIndex]
+ c2 * k3[coordIndex]
+ c3 * k4[coordIndex]
+ c4 * k5[coordIndex]
+ c5 * k6[coordIndex]
+ c6 * k7[coordIndex]
+ c7 * k8[coordIndex]
+ c8 * k9[coordIndex]
+ c9 * k10[coordIndex]
+ c10 * k11[coordIndex]
+ c11 * k12[coordIndex]
+ c12 * k13[coordIndex]
+ c13 * k14[coordIndex]
+ c14 * k15[coordIndex]
+ c15 * k16[coordIndex]
+ c16 * k17[coordIndex]
+ c17 * k18[coordIndex]
+ c18 * k19[coordIndex]
+ c19 * k20[coordIndex]
+ c20 * k21[coordIndex]
+ c21 * k22[coordIndex]
+ c22 * k23[coordIndex]
+ c23 * k24[coordIndex]
+ c24 * k25[coordIndex]
+ c25 * k26[coordIndex]
+ c26 * k27[coordIndex]
+ c27 * k28[coordIndex]
+ c28 * k29[coordIndex]
+ c29 * k30[coordIndex]
+ c30 * k31[coordIndex]
+ c31 * k32[coordIndex]
+ c32 * k33[coordIndex]
+ c33 * k34[coordIndex]
+ c34 * k35[coordIndex]
);
}
stepError = (system->GetErrorValue(this->currTime, nextCoords1, nextCoords2, this->globalStepIndex, this->hierarchyPhase) / this->tolerance) / this->timeStep;
predictedStep = this->timeStep * Scalar( pow(a34 / stepError, a33));
} break;
}
}
void AdvanceStep()
{
if (this->hierarchyPhase == 1)
{
this->currTime += this->timeStep;
}
system->SetCurrCoords(this->currTime, nextCoords2, this->globalStepIndex, this->hierarchyPhase);
}
void RevertStep()
{
system->SetCurrCoords(this->currTime, currCoords, this->globalStepIndex, this->hierarchyPhase);
}
Scalar GetLastStepError()
{
return stepError;
}
Scalar GetTimeStepPrediction()
{
return predictedStep;
}
private:
Scalar* currCoords;
Scalar* nextCoords1;
Scalar* nextCoords2;
Scalar* probeCoords;
Scalar* derivatives;
Scalar* k1;
Scalar* k2;
Scalar* k3;
Scalar* k4;
Scalar* k5;
Scalar* k6;
Scalar* k7;
Scalar *k8;
Scalar *k9;
Scalar *k10;
Scalar *k11;
Scalar *k12;
Scalar *k13;
Scalar *k14;
Scalar *k15;
Scalar *k16;
Scalar *k17;
Scalar *k18;
Scalar *k19;
Scalar *k20;
Scalar *k21;
Scalar *k22;
Scalar *k23;
Scalar *k24;
Scalar *k25;
Scalar *k26;
Scalar *k27;
Scalar *k28;
Scalar *k29;
Scalar *k30;
Scalar *k31;
Scalar *k32;
Scalar *k33;
Scalar *k34;
Scalar *k35;
Scalar stepError;
Scalar predictedStep;
DifferentialSystem<Scalar>* system;
};
|
AtomicOP.h | #ifndef ATOMICIOP_H_
#define ATOMICIOP_H_
/*
* AtomicOP.h:
* a list of atomic operations
*
* Created on: June 11, 2017
* Author: yue_zhang(suda), mszhang
*/
/*
ActivateNode
TanhNode
SigmoidNode
ReluNode
IndexNode
PSubNode
PDotNode
*/
#include "Param.h"
#include "MyLib.h"
#include "Node.h"
#include "Graph.h"
#include "ModelUpdate.h"
class ActivateNode :public Node {
public:
Node* in;
dtype(*activate)(const dtype&);
dtype(*derivate)(const dtype&, const dtype&);
ActivateNode() : Node("activate") {
in = nullptr;
activate = ftanh;
derivate = dtanh;
}
~ActivateNode() = default;
void setFunctions(dtype(*f)(const dtype&), dtype(*f_deri)(const dtype&, const dtype&)) {
activate = f;
derivate = f_deri;
}
void forward(Graph *cg, Node* x) {
in = x;
in->addParent(this);
cg->addNode(this);
}
void compute() {
val().vec() = in->val().vec().unaryExpr(ptr_fun(activate));
}
void backward() {
in->loss().vec() += loss().vec() * in->val().vec().binaryExpr(val().vec(),
ptr_fun(derivate));
}
PExecutor generate();
bool typeEqual(Node* other) {
bool result = Node::typeEqual(other);
return result;
}
};
class ActivateExecutor :public Executor {
};
PExecutor ActivateNode::generate() {
ActivateExecutor* exec = new ActivateExecutor();
exec->batch.push_back(this);
return exec;
};
class TanhNode :public Node {
public:
Node* in;
public:
TanhNode() : Node("tanh") {
in = nullptr;
}
~TanhNode() {
in = nullptr;
}
public:
void forward(Graph &graph, Node &input) {
this->forward(&graph, &input);
}
void forward(Graph *cg, Node* x) {
in = x;
in->addParent(this);
cg->addNode(this);
}
public:
void compute() {
val().vec() = in->val().vec().unaryExpr(ptr_fun(ftanh));
}
void backward() {
in->loss().vec() += loss().vec() * in->val().vec().binaryExpr(val().vec(), ptr_fun(dtanh));
}
public:
PExecutor generate();
// better to rewrite for deep understanding
bool typeEqual(Node* other) {
bool result = Node::typeEqual(other);
return result;
}
};
class TanhExecutor :public Executor {
public:
int dim;
Tensor1D y, x;
int sumDim;
#if USE_GPU
void forward() {
int count = batch.size();
std::vector<dtype*> xs, ys;
xs.reserve(count);
ys.reserve(count);
for (Node *n : batch) {
TanhNode *tanh = static_cast<TanhNode*>(n);
#if TEST_CUDA
tanh->in->val().copyFromHostToDevice();
#endif
xs.push_back(tanh->in->val().value);
ys.push_back(tanh->val().value);
}
n3ldg_cuda::TanhForward(n3ldg_cuda::ActivatedEnum::TANH, xs, count, dim, ys);
#if TEST_CUDA
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
n3ldg_cuda::Assert(batch.at(idx)->getVal().verify("Tanh forward"));
}
#endif
}
#else
void forward() {
int count = batch.size();
//#pragma omp parallel for
sumDim = 0;
for (int idx = 0; idx < count; idx++) {
sumDim += batch[idx]->getDim();
}
x.init(sumDim);
y.init(sumDim);
int offset = 0;
for (int idx = 0; idx < count; idx++) {
TanhNode* ptr = (TanhNode*)batch[idx];
for (int idy = 0; idy < ptr->getDim(); idy++) {
x[offset + idy] = ptr->in->val()[idy];
}
offset += ptr->getDim();
}
y.vec() = x.vec().unaryExpr(ptr_fun(ftanh));
offset = 0;
for (int idx = 0; idx < count; idx++) {
TanhNode* ptr = (TanhNode*)batch[idx];
for (int idy = 0; idy < ptr->getDim(); idy++) {
ptr->val()[idy] = y[offset + idy];
}
offset += ptr->getDim();
}
}
#endif
#if USE_GPU
void backward() {
int count = batch.size();
std::vector<dtype*> vals, losses, in_losses;
vals.reserve(count);
losses.reserve(count);
in_losses.reserve(count);
for (Node *n : batch) {
TanhNode *tanh = static_cast<TanhNode*>(n);
#if TEST_CUDA
tanh->loss().copyFromHostToDevice();
tanh->in->loss().copyFromHostToDevice();
#endif
vals.push_back(tanh->val().value);
losses.push_back(tanh->loss().value);
in_losses.push_back(tanh->in->loss().value);
}
n3ldg_cuda::TanhBackward(n3ldg_cuda::ActivatedEnum::TANH, losses, vals, count, dim,
in_losses);
#if TEST_CUDA
for (Node *n : batch) {
n->backward();
}
for (Node *n : batch) {
TanhNode *tanh = static_cast<TanhNode*>(n);
n3ldg_cuda::Assert(tanh->in->getLoss().verify("TanhExecutor backward"));
}
#endif
}
#else
void backward() {
int count = batch.size();
//#pragma omp parallel for
Tensor1D lx, ly;
lx.init(sumDim);
ly.init(sumDim);
int offset = 0;
for (int idx = 0; idx < count; idx++) {
TanhNode* ptr = (TanhNode*)batch[idx];
for (int idy = 0; idy < ptr->getDim(); idy++) {
ly[offset + idy] = ptr->loss()[idy];
}
offset += ptr->getDim();
}
lx.vec() = ly.vec() * x.vec().binaryExpr(y.vec(), ptr_fun(dtanh));
offset = 0;
for (int idx = 0; idx < count; idx++) {
TanhNode* ptr = (TanhNode*)batch[idx];
for (int idy = 0; idy < ptr->getDim(); idy++) {
ptr->in->loss()[idy] += lx[offset + idy];
}
offset += ptr->getDim();
}
}
#endif
};
PExecutor TanhNode::generate() {
TanhExecutor* exec = new TanhExecutor();
exec->batch.push_back(this);
exec->dim = getDim();
return exec;
};
class SigmoidNode :public Node {
public:
Node* in;
SigmoidNode() : Node("sigmoid") {
in = nullptr;
}
~SigmoidNode() {
in = nullptr;
}
void forward(Graph &graph, Node &input) {
this->forward(&graph, &input);
}
void forward(Graph *cg, Node* x) {
in = x;
in->addParent(this);
cg->addNode(this);
}
public:
void compute() {
val().vec() = in->val().vec().unaryExpr(ptr_fun(fsigmoid));
}
void backward() {
in->loss().vec() += loss().vec() * in->val().vec().binaryExpr(val().vec(),
ptr_fun(dsigmoid));
}
public:
PExecutor generate();
// better to rewrite for deep understanding
bool typeEqual(Node* other) {
bool result = Node::typeEqual(other);
return result;
}
};
class SigmoidExecutor :public Executor {
public:
int dim;
public:
Tensor1D x, y;
int sumDim;
#if USE_GPU
void forward() {
int count = batch.size();
std::vector<dtype*> xs, ys;
xs.reserve(count);
ys.reserve(count);
for (Node *n : batch) {
SigmoidNode *tanh = static_cast<SigmoidNode*>(n);
#if TEST_CUDA
tanh->in->val().copyFromHostToDevice();
#endif
xs.push_back(tanh->in->val().value);
ys.push_back(tanh->val().value);
}
n3ldg_cuda::TanhForward(n3ldg_cuda::ActivatedEnum::SIGMOID, xs, count, dim, ys);
#if TEST_CUDA
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
n3ldg_cuda::Assert(batch.at(idx)->getVal().verify("Sigmoid forward"));
}
#endif
}
#else
#endif
#if USE_GPU
void backward() {
int count = batch.size();
std::vector<dtype*> vals, losses, in_losses;
vals.reserve(count);
losses.reserve(count);
in_losses.reserve(count);
for (Node *n : batch) {
SigmoidNode *tanh = static_cast<SigmoidNode*>(n);
#if TEST_CUDA
tanh->loss().copyFromHostToDevice();
tanh->in->loss().copyFromHostToDevice();
#endif
vals.push_back(tanh->val().value);
losses.push_back(tanh->loss().value);
in_losses.push_back(tanh->in->loss().value);
}
n3ldg_cuda::TanhBackward(n3ldg_cuda::ActivatedEnum::SIGMOID, losses, vals, count, dim,
in_losses);
#if TEST_CUDA
for (Node *n : batch) {
n->backward();
}
for (Node *n : batch) {
SigmoidNode *tanh = static_cast<SigmoidNode*>(n);
n3ldg_cuda::Assert(tanh->in->getLoss().verify("SigmoidExecutor backward"));
}
#endif
}
#else
#endif
};
PExecutor SigmoidNode::generate() {
SigmoidExecutor* exec = new SigmoidExecutor();
exec->batch.push_back(this);
exec->dim = getDim();
return exec;
};
class ReluNode :public Node {
public:
Node* in;
public:
ReluNode() : Node("relu") {
in = nullptr;
}
~ReluNode() {
in = nullptr;
}
void forward(Graph *cg, Node* x) {
in = x;
in->addParent(this);
cg->addNode(this);
}
public:
void compute() {
val().vec() = in->val().vec().unaryExpr(ptr_fun(frelu));
}
void backward() {
in->loss().vec() += loss().vec() * in->val().vec().binaryExpr(val().vec(), ptr_fun(drelu));
}
public:
PExecutor generate();
// better to rewrite for deep understanding
bool typeEqual(Node* other) {
bool result = Node::typeEqual(other);
return result;
}
};
class ReluExecutor :public Executor {};
PExecutor ReluNode::generate() {
ReluExecutor* exec = new ReluExecutor();
exec->batch.push_back(this);
return exec;
};
class PDotNode : public Node {
public:
Node* in1, *in2;
PDotNode() : Node("point-dot", 1) {
in1 = nullptr;
in2 = nullptr;
}
void init(int dim = 1){
if (dim != 1) {
abort();
}
Node::init(dim);
}
void forward(Graph *cg, Node* x1, Node* x2) {
in1 = x1;
in2 = x2;
in1->addParent(this);
in2->addParent(this);
cg->addNode(this);
}
void compute() {
val()[0] = 0.0;
for (int idx = 0; idx < in1->getDim(); idx++) {
val()[0] += in1->val()[idx] * in2->val()[idx];
}
}
void backward() {
for (int idx = 0; idx < in1->getDim(); idx++) {
in1->loss()[idx] += loss()[0] * in2->val()[idx];
in2->loss()[idx] += loss()[0] * in1->val()[idx];
}
}
PExecutor generate();
};
#if USE_GPU
class PDotExecutor :public Executor {
public:
void forward() {
int count = batch.size();
std::vector<dtype*> vals;
ins1.reserve(count);
ins2.reserve(count);
vals.reserve(count);
for (Node *node : batch) {
PDotNode *dot = static_cast<PDotNode*>(node);
ins1.push_back(dot->in1->val().value);
ins2.push_back(dot->in2->val().value);
vals.push_back(dot->val().value);
}
n3ldg_cuda::PDotForward(ins1, ins2, count,
static_cast<PDotNode*>(batch.at(0))->in1->getDim(), vals);
#if TEST_CUDA
for (Node *node : batch) {
PDotNode *dot = static_cast<PDotNode*>(node);
n3ldg_cuda::Assert(dot->in1->getVal().verify("PDot in1"));
n3ldg_cuda::Assert(dot->in2->getVal().verify("PDot in2"));
node->compute();
n3ldg_cuda::Assert(node->getVal().verify("PDot forward"));
}
#endif
}
void backward() {
int count = batch.size();
std::vector<dtype*> losses, in_losses1, in_losses2;
losses.reserve(count);
in_losses1.reserve(count);
in_losses2.reserve(count);
for (Node *node : batch) {
PDotNode *dot = static_cast<PDotNode*>(node);
losses.push_back(dot->loss().value);
in_losses1.push_back(dot->in1->loss().value);
in_losses2.push_back(dot->in2->loss().value);
}
n3ldg_cuda::PDotBackward(losses, ins1, ins2, count,
static_cast<PDotNode*>(batch.at(0))->in1->getDim(), in_losses1,
in_losses2);
#if TEST_CUDA
for (int idx = 0; idx < count; idx++) {
batch[idx]->backward();
n3ldg_cuda::Assert(batch[idx]->getLoss().verify("PDotExecutor backward"));
}
for (Node *node : batch) {
PDotNode *dot = static_cast<PDotNode*>(node);
n3ldg_cuda::Assert(dot->in1->getLoss().verify("PDotExecutor backward in1"));
n3ldg_cuda::Assert(dot->in2->getLoss().verify("PDotExecutor backward in2"));
}
#endif
}
private:
std::vector<dtype*> ins1;
std::vector<dtype*> ins2;
};
#else
class PDotExecutor :public Executor {
};
#endif
PExecutor PDotNode::generate() {
PDotExecutor* exec = new PDotExecutor();
exec->batch.push_back(this);
return exec;
}
class DropoutNode : public Node {
public:
DropoutNode(dtype dropout, bool is_training) : Node("dropout"), drop_value_(dropout),
is_training_(is_training) {}
void init(int dimm) override {
Node::init(dimm);
drop_mask_.init(dimm);
}
#if USE_GPU
void initOnHostAndDevice(int ndim) override {
Node::initOnHostAndDevice(ndim);
drop_mask_.init(ndim);
}
#endif
virtual void generate_dropmask() {
int dropNum = (int)(getDim() * drop_value_);
std::vector<int> tmp_masks(getDim());
for (int idx = 0; idx < getDim(); idx++) {
tmp_masks[idx] = idx < dropNum ? 0 : 1;
}
random_shuffle(tmp_masks.begin(), tmp_masks.end());
for (int idx = 0; idx < getDim(); idx++) {
drop_mask_[idx] = tmp_masks[idx];
}
}
void forward(Graph &graph, Node &x) {
in_ = &x;
in_->addParent(this);
graph.addNode(this);
}
void compute() override {
if (is_training_) {
#if !TEST_CUDA
generate_dropmask();
#endif
} else {
drop_mask_ = 1 - drop_value_;
}
// cout << boost::format("compute is_training:%1%\n") % is_training_;
// std::cout << "before compute:" << in_->val().toString() << std::endl;
val().vec() = in_->val().vec() * drop_mask_.vec();
// std::cout << "after compute:" << val().toString() << std::endl;
}
void backward() override {
// cout << boost::format("backward is_training:%1%\n") % is_training_;
// std::cout << "before backward:" << loss().toString() << std::endl;
in_->loss().vec() += loss().vec() * drop_mask_.vec();
// std::cout << "after backward:" << in_->loss().toString() << std::endl;
}
bool typeEqual(Node *other) override {
DropoutNode *o = static_cast<DropoutNode*>(other);
if (o->is_training_ != is_training_) {
std::cerr << "is_training not equal" << std::endl;
abort();
}
return Node::typeEqual(other) && abs(drop_value_ - o->drop_value_) < 0.001f;
}
size_t typeHashCode() const override {
return Node::typeHashCode() ^ (std::hash<int>{}((int)(10000 * drop_value_)) << 1);
}
PExecutor generate() override;
Node* in() {
return in_;
}
bool isTraning() {
return is_training_;
}
Tensor1D &dropMask() {
return drop_mask_;
}
private:
Node* in_ = nullptr;
Tensor1D drop_mask_;
dtype drop_value_ = 0.0f;
bool is_training_ = true;
};
class DropoutExecutor :public Executor {
public:
Tensor2D drop_mask;
dtype drop_value;
int dim;
bool is_training;
#if USE_GPU
void CalculateDropMask(int count, int dim, const Tensor2D &mask) {
if (is_training) {
n3ldg_cuda::CalculateDropoutMask(drop_value, count, dim, mask.value);
}
}
void forward() {
int count = batch.size();
std::vector<dtype*> xs, ys;
xs.reserve(count);
ys.reserve(count);
drop_mask.init(dim, count);
for (Node *n : batch) {
DropoutNode *tanh = static_cast<DropoutNode*>(n);
#if TEST_CUDA
tanh->in()->val().copyFromHostToDevice();
#endif
xs.push_back(tanh->in()->getVal().value);
ys.push_back(tanh->getVal().value);
}
CalculateDropMask(count, dim, drop_mask);
n3ldg_cuda::DropoutForward(xs, count, dim, is_training, drop_mask.value, drop_value, ys);
#if TEST_CUDA
drop_mask.copyFromDeviceToHost();
for (int i = 0; i < count; ++i) {
for (int j = 0; j < dim; ++j) {
dtype v = drop_mask[i][j];
static_cast<DropoutNode*>(batch.at(i))->dropMask()[j] = v <= drop_value ? 0 : 1;
}
}
for (int idx = 0; idx < count; idx++) {
batch[idx]->compute();
n3ldg_cuda::Assert(batch.at(idx)->val().verify("Dropout forward"));
}
#endif
}
void backward() {
int count = batch.size();
std::vector<dtype*> vals, losses, in_losses;
vals.reserve(count);
losses.reserve(count);
in_losses.reserve(count);
for (Node *n : batch) {
DropoutNode *tanh = static_cast<DropoutNode*>(n);
#if TEST_CUDA
tanh->loss().copyFromHostToDevice();
tanh->in()->loss().copyFromHostToDevice();
#endif
vals.push_back(tanh->val().value);
losses.push_back(tanh->loss().value);
in_losses.push_back(tanh->in()->loss().value);
}
n3ldg_cuda::DropoutBackward(losses, vals, count, dim, is_training, drop_mask.value,
drop_value, in_losses);
#if TEST_CUDA
for (Node *n : batch) {
n->backward();
}
for (Node *n : batch) {
DropoutNode *tanh = static_cast<DropoutNode*>(n);
n3ldg_cuda::Assert(tanh->in()->loss().verify("DropoutExecutor backward"));
}
#endif
}
#endif
};
PExecutor DropoutNode::generate() {
DropoutExecutor* exec = new DropoutExecutor();
exec->batch.push_back(this);
exec->is_training = isTraning();
exec->dim = getDim();
return exec;
}
#endif
|
kmp_detach_tasks_t3.c | // RUN: %libomp-compile && env OMP_NUM_THREADS='3' %libomp-run
// RUN: %libomp-compile && env OMP_NUM_THREADS='1' %libomp-run
// The runtime currently does not get dependency information from GCC.
// UNSUPPORTED: gcc
// REQUIRES: !abt
#include <stdio.h>
#include <omp.h>
#include "omp_my_sleep.h"
// detached untied
#define PTASK_FLAG_DETACHABLE 0x40
// OpenMP RTL interfaces
typedef unsigned long long kmp_uint64;
typedef long long kmp_int64;
typedef struct ID {
int reserved_1;
int flags;
int reserved_2;
int reserved_3;
char *psource;
} id;
// Compiler-generated code (emulation)
typedef struct ident {
void* dummy; // not used in the library
} ident_t;
typedef enum kmp_event_type_t {
KMP_EVENT_UNINITIALIZED = 0,
KMP_EVENT_ALLOW_COMPLETION = 1
} kmp_event_type_t;
typedef struct {
kmp_event_type_t type;
union {
void *task;
} ed;
} kmp_event_t;
typedef struct shar { // shareds used in the task
} *pshareds;
typedef struct task {
pshareds shareds;
int(*routine)(int,struct task*);
int part_id;
// void *destructor_thunk; // optional, needs flag setting if provided
// int priority; // optional, needs flag setting if provided
// ------------------------------
// privates used in the task:
omp_event_handle_t evt;
} *ptask, kmp_task_t;
typedef struct DEP {
size_t addr;
size_t len;
int flags;
} dep;
typedef int(* task_entry_t)( int, ptask );
#ifdef __cplusplus
extern "C" {
#endif
extern int __kmpc_global_thread_num(void *id_ref);
extern int** __kmpc_omp_task_alloc(id *loc, int gtid, int flags,
size_t sz, size_t shar, task_entry_t rtn);
extern int __kmpc_omp_task_with_deps(id *loc, int gtid, ptask task, int nd,
dep *dep_lst, int nd_noalias, dep *noalias_dep_lst);
extern int __kmpc_omp_task(id *loc, int gtid, kmp_task_t *task);
extern omp_event_handle_t __kmpc_task_allow_completion_event(
ident_t *loc_ref, int gtid, kmp_task_t *task);
#ifdef __cplusplus
}
#endif
int volatile checker;
// User's code, outlined into task entry
int task_entry(int gtid, ptask task) {
checker = 1;
return 0;
}
int main() {
int i, j, gtid = __kmpc_global_thread_num(NULL);
int nt = omp_get_max_threads();
ptask task;
pshareds psh;
checker = 0;
omp_set_dynamic(0);
#pragma omp parallel //num_threads(N)
{
#pragma omp master
{
#pragma omp task depend(inout:nt)
{
my_sleep(2.0);
}
int gtid = __kmpc_global_thread_num(NULL);
omp_event_handle_t evt;
/*
#pragma omp task detach(evt)
{}
*/
task = (ptask)__kmpc_omp_task_alloc(NULL,gtid,PTASK_FLAG_DETACHABLE,
sizeof(struct task),sizeof(struct shar),&task_entry);
psh = task->shareds;
evt = (omp_event_handle_t)__kmpc_task_allow_completion_event(NULL,gtid,task);
task->evt = evt;
dep sdep;
sdep.addr = (size_t)&nt;
sdep.len = 0L;
sdep.flags = 3;
__kmpc_omp_task_with_deps(NULL,gtid,task,1,&sdep,0,0);
//__kmpc_omp_task(NULL, gtid, task);
omp_fulfill_event(evt);
#pragma omp taskwait
;
// printf("after tw %d\n", omp_get_thread_num());
} // end master
} // end parallel
// check results
if (checker == 1) {
printf("passed\n");
return 0;
} else {
printf("failed\n");
return 1;
}
}
|
hoImageRegHomogenousTransformation.h | /** \file hoImageRegHomogenousTransformation.h
\brief Define the class for the homogenous geometry transformation in gadgetron registration
\author Hui Xue
*/
#ifndef hoImageRegHomogenousTransformation_H_
#define hoImageRegHomogenousTransformation_H_
#pragma once
#include "hoImageRegParametricTransformation.h"
#include "hoMatrix.h"
#include "hoNDArray_linalg.h"
namespace Gadgetron {
/// Homogenous transformation
template<typename ValueType, unsigned int D>
class hoImageRegHomogenousTransformation : public hoImageRegParametricTransformation<ValueType, D, D>
{
public:
typedef hoImageRegParametricTransformation<ValueType, D, D> BaseClass;
typedef hoImageRegHomogenousTransformation<ValueType, D> Self;
typedef ValueType T;
typedef typename BaseClass::input_point_type input_point_type;
typedef typename BaseClass::output_point_type output_point_type;
typedef typename BaseClass::jacobian_parameter_type jacobian_parameter_type;
typedef typename BaseClass::jacobian_position_type jacobian_position_type;
typedef typename BaseClass::ParaStatus ParaStatus;
typedef typename BaseClass::ParaStatusType ParaStatusType;
hoImageRegHomogenousTransformation();
virtual ~hoImageRegHomogenousTransformation();
// get/set the ith parameter
virtual ValueType get_parameter(size_t i) const;
virtual void set_parameter(size_t i, ValueType v);
virtual bool invertTransformation();
virtual bool setIdentity();
virtual bool transform(const T* pt_in, T* pt_out) const;
virtual bool transform(const T& xi, const T& yi, T& xo, T& yo) const;
virtual bool transform(const T& xi, const T& yi, const T& zi, T& xo, T& yo, T& zo) const;
virtual bool transform(const size_t* pt_in, T* pt_out) const;
virtual bool transform(const size_t* pt_in, size_t N, T* pt_out) const;
virtual bool transform(const size_t& xi, const size_t& yi, T& xo, T& yo) const;
virtual bool transform(const size_t* xi, const size_t* yi, size_t N, T* xo, T* yo) const;
virtual bool transform(const size_t& xi, const size_t& yi, const size_t& zi, T& xo, T& yo, T& zo) const;
virtual bool transform(const size_t* xi, const size_t* yi, const size_t* zi, size_t N, T* xo, T* yo, T* zo) const;
/// compute jacobian matrix to parameters
/// D*num_parameters_ matrix
virtual bool jacobianParameter(const input_point_type& pos, jacobian_parameter_type& jac);
/// compute jacobian matrix to spatial position
/// D*D matrix
virtual bool jacobianPosition(const input_point_type& pos, jacobian_position_type& jac);
virtual void print(std::ostream& os) const;
virtual void printTransform(std::ostream& os) const;
virtual std::string transformationName() const
{
return std::string("hoImageRegHomogenousTransformation");
}
using BaseClass::gt_timer1_;
using BaseClass::gt_timer2_;
using BaseClass::gt_timer3_;
using BaseClass::performTiming_;
using BaseClass::gt_exporter_;
using BaseClass::debugFolder_;
protected:
using BaseClass::num_parameters_;
using BaseClass::para_status_;
/// transformation matrix
hoMatrix<ValueType> matrix_;
};
template <typename ValueType, unsigned int D>
hoImageRegHomogenousTransformation<ValueType, D>::hoImageRegHomogenousTransformation() : BaseClass()
{
num_parameters_ = D*(D+1);
para_status_.resize(num_parameters_, BaseClass::Active);
GADGET_CHECK_THROW(matrix_.createMatrix(D+1, D+1));
GADGET_CHECK_THROW(matrix_.setIdentity());
}
template <typename ValueType, unsigned int D>
hoImageRegHomogenousTransformation<ValueType, D>::~hoImageRegHomogenousTransformation()
{
}
template <typename ValueType, unsigned int D>
inline ValueType hoImageRegHomogenousTransformation<ValueType, D>::get_parameter(size_t i) const
{
GADGET_DEBUG_CHECK_THROW(i<num_parameters_);
return matrix_( i/(D+1), i%(D+1) );
}
template <typename ValueType, unsigned int D>
inline void hoImageRegHomogenousTransformation<ValueType, D>::set_parameter(size_t i, ValueType v)
{
GADGET_DEBUG_CHECK_THROW(i<num_parameters_);
matrix_( i/(D+1), i%(D+1) ) = v;
}
template <typename ValueType, unsigned int D>
bool hoImageRegHomogenousTransformation<ValueType, D>::invertTransformation()
{
GADGET_CHECK_EXCEPTION_RETURN_FALSE(Gadgetron::invert(matrix_) );
return true;
}
template <typename ValueType, unsigned int D>
bool hoImageRegHomogenousTransformation<ValueType, D>::setIdentity()
{
GADGET_CHECK_RETURN_FALSE( matrix_.setIdentity() );
return true;
}
template <typename ValueType, unsigned int D>
bool hoImageRegHomogenousTransformation<ValueType, D>::transform(const T* pt_in, T* pt_out) const
{
try
{
unsigned int ii, jj;
for ( ii=0; ii<D; ii++ )
{
pt_out[ii] = 0;
for ( jj=0; jj<D; jj++ )
{
pt_out[ii] += matrix_(ii, jj) * pt_in[jj];
}
pt_out[ii] += matrix_(ii, D);
}
}
catch(...)
{
GERROR_STREAM("Error happened in hoImageRegHomogenousTransformation<ValueType, D>::transform(const T* pt_in, T* pt_out) ... ");
return false;
}
return true;
}
template <typename ValueType, unsigned int D>
bool hoImageRegHomogenousTransformation<ValueType, D>::transform(const T& xi, const T& yi, T& xo, T& yo) const
{
try
{
xo = matrix_(0, 0)*xi + matrix_(0, 1)*yi + matrix_(0, 2);
yo = matrix_(1, 0)*xi + matrix_(1, 1)*yi + matrix_(1, 2);
}
catch(...)
{
GERROR_STREAM("Error happened in hoImageRegHomogenousTransformation<ValueType, D>::transform(const T& xi, const T& yi, T& xo, T& yo) ... ");
return false;
}
return true;
}
template <typename ValueType, unsigned int D>
bool hoImageRegHomogenousTransformation<ValueType, D>::transform(const T& xi, const T& yi, const T& zi, T& xo, T& yo, T& zo) const
{
try
{
xo = matrix_(0, 0)*xi + matrix_(0, 1)*yi + matrix_(0, 2)*zi + matrix_(0, 3);
yo = matrix_(1, 0)*xi + matrix_(1, 1)*yi + matrix_(1, 2)*zi + matrix_(1, 3);
zo = matrix_(2, 0)*xi + matrix_(2, 1)*yi + matrix_(2, 2)*zi + matrix_(2, 3);
}
catch(...)
{
GERROR_STREAM("Error happened in hoImageRegHomogenousTransformation<ValueType, D>::transform(const T& xi, const T& yi, const T& zi, T& xo, T& yo, T& zo) ... ");
return false;
}
return true;
}
template <typename ValueType, unsigned int D>
bool hoImageRegHomogenousTransformation<ValueType, D>::transform(const size_t* pt_in, T* pt_out) const
{
try
{
unsigned int ii, jj;
for ( ii=0; ii<D; ii++ )
{
pt_out[ii] = 0;
for ( jj=0; jj<D; jj++ )
{
pt_out[ii] += matrix_(ii, jj) * pt_in[jj];
}
pt_out[ii] += matrix_(ii, D);
}
}
catch(...)
{
GERROR_STREAM("Error happened in hoImageRegHomogenousTransformation<ValueType, D>::transform(const size_t* pt_in, T* pt_out) ... ");
return false;
}
return true;
}
template <typename ValueType, unsigned int D>
bool hoImageRegHomogenousTransformation<ValueType, D>::transform(const size_t* pt_in, size_t N, T* pt_out) const
{
try
{
long long ii;
#pragma omp parallel for default(none) private(ii) shared(pt_in, pt_out, N)
for ( ii=0; ii<(long long)N; ii++ )
{
this->transform(pt_in+ii*D, pt_out+ii*D);
}
}
catch(...)
{
GERROR_STREAM("Errors happen in hoImageRegHomogenousTransformation<ValueType, D>::transform(const size_t* pt_in, size_t N, T* pt_out) ... ");
return false;
}
return true;
}
template <typename ValueType, unsigned int D>
bool hoImageRegHomogenousTransformation<ValueType, D>::transform(const size_t& xi, const size_t& yi, T& xo, T& yo) const
{
try
{
xo = matrix_(0, 0)*xi + matrix_(0, 1)*yi + matrix_(0, 2);
yo = matrix_(1, 0)*xi + matrix_(1, 1)*yi + matrix_(1, 2);
}
catch(...)
{
GERROR_STREAM("Error happened in hoImageRegHomogenousTransformation<ValueType, D>::transform(const size_t& xi, const size_t& yi, T& xo, T& yo) ... ");
return false;
}
return true;
}
template <typename ValueType, unsigned int D>
bool hoImageRegHomogenousTransformation<ValueType, D>::transform(const size_t* xi, const size_t* yi, size_t N, T* xo, T* yo) const
{
try
{
long long ii;
#pragma omp parallel for default(none) private(ii) shared(xi, yi, xo, yo, N)
for ( ii=0; ii<(long long)N; ii++ )
{
this->transform(xi[ii], yi[ii], xo[ii], yo[ii]);
}
}
catch(...)
{
GERROR_STREAM("Errors happen in hoImageRegHomogenousTransformation<ValueType, D>::transform(const size_t* xi, const size_t* yi, size_t N, T* xo, T* yo) ... ");
return false;
}
return true;
}
template <typename ValueType, unsigned int D>
bool hoImageRegHomogenousTransformation<ValueType, D>::transform(const size_t& xi, const size_t& yi, const size_t& zi, T& xo, T& yo, T& zo) const
{
try
{
xo = matrix_(0, 0)*xi + matrix_(0, 1)*yi + matrix_(0, 2)*zi + matrix_(0, 3);
yo = matrix_(1, 0)*xi + matrix_(1, 1)*yi + matrix_(1, 2)*zi + matrix_(1, 3);
zo = matrix_(2, 0)*xi + matrix_(2, 1)*yi + matrix_(2, 2)*zi + matrix_(2, 3);
}
catch(...)
{
GERROR_STREAM("Error happened in hoImageRegHomogenousTransformation<ValueType, D>::transform(const size_t& xi, const size_t& yi, const size_t& zi, T& xo, T& yo, T& zo) ... ");
return false;
}
return true;
}
template <typename ValueType, unsigned int D>
bool hoImageRegHomogenousTransformation<ValueType, D>::transform(const size_t* xi, const size_t* yi, const size_t* zi, size_t N, T* xo, T* yo, T* zo) const
{
try
{
long long ii;
#pragma omp parallel for default(none) private(ii) shared(xi, yi, zi, xo, yo, zo, N)
for ( ii=0; ii<(long long)N; ii++ )
{
this->transform(xi[ii], yi[ii], zi[ii], xo[ii], yo[ii], zo[ii]);
}
}
catch(...)
{
GERROR_STREAM("Errors happen in hoImageRegHomogenousTransformation<ValueType, D>::transform(const size_t* xi, const size_t* yi, const size_t* zi, size_t N, T* xo, T* yo, T* zo) ... ");
return false;
}
return true;
}
template <typename ValueType, unsigned int D>
bool hoImageRegHomogenousTransformation<ValueType, D>::jacobianParameter(const input_point_type& pos, jacobian_parameter_type& jac)
{
try
{
jac.createMatrix(D, num_parameters_);
Gadgetron::clear(jac);
if ( D == 2 )
{
jac(0, 0) = pos(0);
jac(0, 1) = pos(1);
jac(0, 2) = 1;
jac(1, 3) = pos(0);
jac(1, 4) = pos(1);
jac(1, 5) = 1;
}
else if ( D == 3 )
{
jac(0, 0) = pos(0);
jac(0, 1) = pos(1);
jac(0, 2) = pos(2);
jac(0, 3) = 1;
jac(1, 4) = pos(0);
jac(1, 5) = pos(1);
jac(1, 6) = pos(2);
jac(1, 7) = 1;
jac(2, 8) = pos(0);
jac(2, 9) = pos(1);
jac(2, 10) = pos(2);
jac(2, 11) = 1;
}
else
{
unsigned int ii, jj;
for ( ii=0; ii<D; ii++ )
{
for ( jj=0; jj<D; jj++ )
{
jac(ii, ii*(D+1)+jj) = pos(jj);
}
jac(ii, ii*(D+1)+D) = 1;
}
}
}
catch(...)
{
GERROR_STREAM("Errors happen in hoImageRegHomogenousTransformation<ValueType, D>::jacobianParameter(const input_point_type& pos, jacobian_parameter_type& jac) ... ");
return false;
}
return true;
}
template <typename ValueType, unsigned int D>
bool hoImageRegHomogenousTransformation<ValueType, D>::jacobianPosition(const input_point_type& pos, jacobian_position_type& jac)
{
try
{
jac.createMatrix(D, D);
Gadgetron::clear(jac);
if ( D == 2 )
{
jac(0, 0) = matrix_(0, 0);
jac(0, 1) = matrix_(0, 1);
jac(1, 0) = matrix_(1, 0);
jac(1, 1) = matrix_(1, 1);
}
else if ( D == 3 )
{
jac(0, 0) = matrix_(0, 0);
jac(0, 1) = matrix_(0, 1);
jac(0, 2) = matrix_(0, 2);
jac(1, 0) = matrix_(1, 0);
jac(1, 1) = matrix_(1, 1);
jac(1, 2) = matrix_(1, 2);
jac(2, 0) = matrix_(2, 0);
jac(2, 1) = matrix_(2, 1);
jac(2, 2) = matrix_(2, 2);
}
else
{
unsigned int ii, jj;
for ( ii=0; ii<D; ii++ )
{
for ( jj=0; jj<D; jj++ )
{
jac(ii, jj) = matrix_(ii, jj);
}
}
}
}
catch(...)
{
GERROR_STREAM("Errors happen in hoImageRegHomogenousTransformation<ValueType, D>::jacobianPosition(const input_point_type& pos, jacobian_position_type& jac) ... ");
return false;
}
return true;
}
template <typename ValueType, unsigned int D>
void hoImageRegHomogenousTransformation<ValueType, D>::print(std::ostream& os) const
{
using namespace std;
os << "--------------Gagdgetron homogenous transformation -------------" << endl;
os << "Input dimension is : " << D << endl;
os << "Output dimension is : " << D << endl;
std::string elemTypeName = std::string(typeid(T).name());
os << "Transformation data type is : " << elemTypeName << std::endl;
os << "Number of parameters is : " << num_parameters_ << endl;
size_t i;
os << "Status of parameters: " << endl;
for ( i=0; i<this->num_parameters_; i++ )
{
os << "Para " << i << " : \t";
if ( para_status_[i] == BaseClass::Active )
{
os << "Active";
}
else if ( para_status_[i] == BaseClass::Inactive )
{
os << "Inactive";
}
else
{
os << "Unknown";
}
os << endl;
}
os << "Transformation: " << endl;
this->printTransform(os);
}
template <typename ValueType, unsigned int D>
void hoImageRegHomogenousTransformation<ValueType, D>::printTransform(std::ostream& os) const
{
using namespace std;
size_t i;
os << "[ ";
for ( i=0; i<this->num_parameters_; i++ )
{
os << this->get_parameter(i) << " \t";
}
os << " ]" << endl;
}
}
#endif // hoImageRegHomogenousTransformation_H_
|
Determanager.h | /*****************************************************************************************[Cooperation.h]
Copyright (c) 2008-20011, Youssef Hamadi, Saïd Jabbour and Lakhdar Saïs
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
associated documentation files (the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge, publish, distribute,
sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or
substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*******************************************************************************************/
/* importClauseSwitchMode : (Cooperation* coop)
Description :
In detreministic case, the two barriers guaranty that during import process no other thread can return to search.
Otherwise, each found a solution go out.*/
//=================================================================================================
using namespace Minisat;
lbool Solver::importClauses(Cooperation* coop) {
//Control the limit size clause export
coop->updateLimitExportClauses(this);
switch(deterministic_mode){
case 0: // non deterministic case
{
for(int t = 0; t < coop->nThreads(); t++)
if(coop->answer(t) != l_Undef)
return coop->answer(t);
coop->importExtraClauses(this);
coop->importExtraUnits(this, extraUnits);
break;
}
case 1: // deterministic case static frequency
{
if((int) conflicts % coop->initFreq == 0 || coop->answer(threadId) != l_Undef){
#pragma omp barrier
for(int t = 0; t < coop->nThreads(); t++)
if(coop->answer(t) != l_Undef) return coop->answer(t);
coop->importExtraClauses(this);
coop->importExtraUnits(this, extraUnits);
#pragma omp barrier
}
break;
}
case 2: // deterministic case dynamic frequency
{
if(((int) conflicts % coop->deterministic_freq[threadId] == 0) || (coop->answer(threadId) != l_Undef)){ coop->learntsz[threadId] = nLearnts();
#pragma omp barrier
// each thread has its own frequency barrier synchronization
updateFrequency(coop);
coop->deterministic_freq[threadId] = updateFrequency(coop);
for(int t = 0; t < coop->nThreads(); t++)
if(coop->answer(t) != l_Undef) return coop->answer(t);
coop->importExtraClauses(this);
coop->importExtraUnits(this, extraUnits);
#pragma omp barrier
}
break;
}
}
return l_Undef;
}
/*_________________________________________________________________________________________________
updateFrequency : (Cooperation* coop)
Description :
when det=2, each thread try to estimate the number of conflicts under which it must to join the barrier.
This estimation based on the calculus of the number of learnts clauses of all learnts and assume that
greater the learnts base slower is the unit propagation, which stay a not bad estimation.
*/
int Solver::updateFrequency(Cooperation* coop){
double freq = 0;
int maxLearnts = 0;
for(int t = 0; t < coop->nThreads(); t++)
if((int)coop->learntsz[t] > maxLearnts)
maxLearnts = (int)coop->learntsz[t];
freq = coop->initFreq + (double)coop->initFreq * (maxLearnts -learnts.size()) / maxLearnts;
return (int) freq;
}
/*_________________________________________________________________________________________________
uncheckedEnqueueImportedUnits : (Cooperation* coop)
Description :
At level 0, units literals propaged are exported to others threads
*/
void Solver::exportClause(Cooperation* coop, vec<Lit>& learnt_clause) {
if(coop->limitszClauses() < 1)
return;
if(decisionLevel() == 0){
for(int i = tailUnitLit; i < trail.size(); i++)
coop->exportExtraUnit(this, trail[i]) ;
tailUnitLit = trail.size();
}else
coop->exportExtraClause(this, learnt_clause);
}
//=================================================================================================
// add Clauses received from others threads
CRef Solver::addExtraClause(vec<Lit>& lits){
CRef cr = ca.alloc(lits, true);
learnts.push(cr);
attachClause(cr);
claBumpActivity(ca[cr]);
return cr;
}
//=================================================================================================
// at level 0, unit extra clauses stored are propagated
void Solver::propagateExtraUnits(){
for(int i = 0; i < extraUnits.size(); i++)
if(value(extraUnits[i]) == l_Undef)
uncheckedEnqueue(extraUnits[i]);
}
|
atomic_messages.c | // RUN: %clang_cc1 -verify -fopenmp -ferror-limit 100 %s
int foo() {
L1:
foo();
#pragma omp atomic
// expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected an expression statement}}
{
foo();
goto L1; // expected-error {{use of undeclared label 'L1'}}
}
goto L2; // expected-error {{use of undeclared label 'L2'}}
#pragma omp atomic
// expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected an expression statement}}
{
foo();
L2:
foo();
}
return 0;
}
struct S {
int a;
};
int readint() {
int a = 0, b = 0;
// Test for atomic read
#pragma omp atomic read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected an expression statement}}
;
#pragma omp atomic read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected built-in assignment operator}}
foo();
#pragma omp atomic read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected built-in assignment operator}}
a += b;
#pragma omp atomic read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected lvalue expression}}
a = 0;
#pragma omp atomic read
a = b;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}}
#pragma omp atomic read read
a = b;
return 0;
}
int readS() {
struct S a, b;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'read' clause}}
#pragma omp atomic read read
// expected-error@+2 {{the statement for 'atomic read' must be an expression statement of form 'v = x;', where v and x are both lvalue expressions with scalar type}}
// expected-note@+1 {{expected expression of scalar type}}
a = b;
return a.a;
}
int writeint() {
int a = 0, b = 0;
// Test for atomic write
#pragma omp atomic write
// expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}}
// expected-note@+1 {{expected an expression statement}}
;
#pragma omp atomic write
// expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}}
// expected-note@+1 {{expected built-in assignment operator}}
foo();
#pragma omp atomic write
// expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}}
// expected-note@+1 {{expected built-in assignment operator}}
a += b;
#pragma omp atomic write
a = 0;
#pragma omp atomic write
a = b;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}}
#pragma omp atomic write write
a = b;
return 0;
}
int writeS() {
struct S a, b;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'write' clause}}
#pragma omp atomic write write
// expected-error@+2 {{the statement for 'atomic write' must be an expression statement of form 'x = expr;', where x is a lvalue expression with scalar type}}
// expected-note@+1 {{expected expression of scalar type}}
a = b;
return a.a;
}
int updateint() {
int a = 0, b = 0;
// Test for atomic update
#pragma omp atomic update
// expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected an expression statement}}
;
#pragma omp atomic
// expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected built-in binary or unary operator}}
foo();
#pragma omp atomic
// expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected built-in binary operator}}
a = b;
#pragma omp atomic update
// expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}}
a = b || a;
#pragma omp atomic update
// expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}}
a = a && b;
#pragma omp atomic update
// expected-error@+2 {{the statement for 'atomic update' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected in right hand side of expression}}
a = (float)a + b;
#pragma omp atomic
// expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected in right hand side of expression}}
a = 2 * b;
#pragma omp atomic
// expected-error@+2 {{the statement for 'atomic' must be an expression statement of form '++x;', '--x;', 'x++;', 'x--;', 'x binop= expr;', 'x = x binop expr' or 'x = expr binop x', where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected in right hand side of expression}}
a = b + *&a;
#pragma omp atomic update
*&a = *&a + 2;
#pragma omp atomic update
a++;
#pragma omp atomic
++a;
#pragma omp atomic update
a--;
#pragma omp atomic
--a;
#pragma omp atomic update
a += b;
#pragma omp atomic
a %= b;
#pragma omp atomic update
a *= b;
#pragma omp atomic
a -= b;
#pragma omp atomic update
a /= b;
#pragma omp atomic
a &= b;
#pragma omp atomic update
a ^= b;
#pragma omp atomic
a |= b;
#pragma omp atomic update
a <<= b;
#pragma omp atomic
a >>= b;
#pragma omp atomic update
a = b + a;
#pragma omp atomic
a = a * b;
#pragma omp atomic update
a = b - a;
#pragma omp atomic
a = a / b;
#pragma omp atomic update
a = b & a;
#pragma omp atomic
a = a ^ b;
#pragma omp atomic update
a = b | a;
#pragma omp atomic
a = a << b;
#pragma omp atomic
a = b >> a;
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'update' clause}}
#pragma omp atomic update update
a /= b;
return 0;
}
int captureint() {
int a = 0, b = 0, c = 0;
// Test for atomic capture
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected compound statement}}
;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected assignment expression}}
foo();
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected built-in binary or unary operator}}
a = b;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected assignment expression}}
a = b || a;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected one of '+', '*', '-', '/', '&', '^', '|', '<<', or '>>' built-in operations}}
b = a = a && b;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected assignment expression}}
a = (float)a + b;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected assignment expression}}
a = 2 * b;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected assignment expression}}
a = b + *&a;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected exactly two expression statements}}
{ a = b; }
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected exactly two expression statements}}
{}
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected in right hand side of the first expression}}
{a = b;a = b;}
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be a compound statement of form '{v = x; x binop= expr;}', '{x binop= expr; v = x;}', '{v = x; x = x binop expr;}', '{v = x; x = expr binop x;}', '{x = x binop expr; v = x;}', '{x = expr binop x; v = x;}' or '{v = x; x = expr;}', '{v = x; x++;}', '{v = x; ++x;}', '{++x; v = x;}', '{x++; v = x;}', '{v = x; x--;}', '{v = x; --x;}', '{--x; v = x;}', '{x--; v = x;}' where x is an l-value expression with scalar type}}
// expected-note@+1 {{expected in right hand side of the first expression}}
{a = b; a = b || a;}
#pragma omp atomic capture
{b = a; a = a && b;}
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected in right hand side of expression}}
b = a = (float)a + b;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected in right hand side of expression}}
b = a = 2 * b;
#pragma omp atomic capture
// expected-error@+2 {{the statement for 'atomic capture' must be an expression statement of form 'v = ++x;', 'v = --x;', 'v = x++;', 'v = x--;', 'v = x binop= expr;', 'v = x = x binop expr' or 'v = x = expr binop x', where x and v are both l-value expressions with scalar type}}
// expected-note@+1 {{expected in right hand side of expression}}
b = a = b + *&a;
#pragma omp atomic capture
c = *&a = *&a + 2;
#pragma omp atomic capture
c = a++;
#pragma omp atomic capture
c = ++a;
#pragma omp atomic capture
c = a--;
#pragma omp atomic capture
c = --a;
#pragma omp atomic capture
c = a += b;
#pragma omp atomic capture
c = a %= b;
#pragma omp atomic capture
c = a *= b;
#pragma omp atomic capture
c = a -= b;
#pragma omp atomic capture
c = a /= b;
#pragma omp atomic capture
c = a &= b;
#pragma omp atomic capture
c = a ^= b;
#pragma omp atomic capture
c = a |= b;
#pragma omp atomic capture
c = a <<= b;
#pragma omp atomic capture
c = a >>= b;
#pragma omp atomic capture
c = a = b + a;
#pragma omp atomic capture
c = a = a * b;
#pragma omp atomic capture
c = a = b - a;
#pragma omp atomic capture
c = a = a / b;
#pragma omp atomic capture
c = a = b & a;
#pragma omp atomic capture
c = a = a ^ b;
#pragma omp atomic capture
c = a = b | a;
#pragma omp atomic capture
c = a = a << b;
#pragma omp atomic capture
c = a = b >> a;
#pragma omp atomic capture
{ c = *&a; *&a = *&a + 2;}
#pragma omp atomic capture
{ *&a = *&a + 2; c = *&a;}
#pragma omp atomic capture
{c = a; a++;}
#pragma omp atomic capture
{c = a; (a)++;}
#pragma omp atomic capture
{++a;c = a;}
#pragma omp atomic capture
{c = a;a--;}
#pragma omp atomic capture
{--a;c = a;}
#pragma omp atomic capture
{c = a; a += b;}
#pragma omp atomic capture
{c = a; (a) += b;}
#pragma omp atomic capture
{a %= b; c = a;}
#pragma omp atomic capture
{c = a; a *= b;}
#pragma omp atomic capture
{a -= b;c = a;}
#pragma omp atomic capture
{c = a; a /= b;}
#pragma omp atomic capture
{a &= b; c = a;}
#pragma omp atomic capture
{c = a; a ^= b;}
#pragma omp atomic capture
{a |= b; c = a;}
#pragma omp atomic capture
{c = a; a <<= b;}
#pragma omp atomic capture
{a >>= b; c = a;}
#pragma omp atomic capture
{c = a; a = b + a;}
#pragma omp atomic capture
{a = a * b; c = a;}
#pragma omp atomic capture
{c = a; a = b - a;}
#pragma omp atomic capture
{a = a / b; c = a;}
#pragma omp atomic capture
{c = a; a = b & a;}
#pragma omp atomic capture
{a = a ^ b; c = a;}
#pragma omp atomic capture
{c = a; a = b | a;}
#pragma omp atomic capture
{a = a << b; c = a;}
#pragma omp atomic capture
{c = a; a = b >> a;}
#pragma omp atomic capture
{c = a; a = foo();}
// expected-error@+1 {{directive '#pragma omp atomic' cannot contain more than one 'capture' clause}}
#pragma omp atomic capture capture
b = a /= b;
return 0;
}
|
GB_binop__bset_uint64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bset_uint64)
// A.*B function (eWiseMult): GB (_AemultB_08__bset_uint64)
// A.*B function (eWiseMult): GB (_AemultB_02__bset_uint64)
// A.*B function (eWiseMult): GB (_AemultB_04__bset_uint64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bset_uint64)
// A*D function (colscale): GB ((none))
// D*A function (rowscale): GB ((none))
// C+=B function (dense accum): GB (_Cdense_accumB__bset_uint64)
// C+=b function (dense accum): GB (_Cdense_accumb__bset_uint64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bset_uint64)
// C=scalar+B GB (_bind1st__bset_uint64)
// C=scalar+B' GB (_bind1st_tran__bset_uint64)
// C=A+scalar GB (_bind2nd__bset_uint64)
// C=A'+scalar GB (_bind2nd_tran__bset_uint64)
// C type: uint64_t
// A type: uint64_t
// A pattern? 0
// B type: uint64_t
// B pattern? 0
// BinaryOp: cij = GB_BITSET (aij, bij, uint64_t, 64)
#define GB_ATYPE \
uint64_t
#define GB_BTYPE \
uint64_t
#define GB_CTYPE \
uint64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_BITSET (x, y, uint64_t, 64) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
1
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSET || GxB_NO_UINT64 || GxB_NO_BSET_UINT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bset_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bset_uint64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bset_uint64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint64_t
uint64_t bwork = (*((uint64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info GB ((none))
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *restrict Cx = (uint64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bset_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint64_t alpha_scalar ;
uint64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint64_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bset_uint64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bset_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bset_uint64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bset_uint64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bset_uint64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t x = (*((uint64_t *) x_input)) ;
uint64_t *Bx = (uint64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_BITSET (x, bij, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bset_uint64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint64_t *Cx = (uint64_t *) Cx_output ;
uint64_t *Ax = (uint64_t *) Ax_input ;
uint64_t y = (*((uint64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_BITSET (aij, y, uint64_t, 64) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (x, aij, uint64_t, 64) ; \
}
GrB_Info GB (_bind1st_tran__bset_uint64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t x = (*((const uint64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_BITSET (aij, y, uint64_t, 64) ; \
}
GrB_Info GB (_bind2nd_tran__bset_uint64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint64_t y = (*((const uint64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__bxnor_uint16.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCUDA_DEV
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__bxnor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_08__bxnor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_02__bxnor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_04__bxnor_uint16)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__bxnor_uint16)
// A*D function (colscale): GB (_AxD__bxnor_uint16)
// D*A function (rowscale): GB (_DxB__bxnor_uint16)
// C+=B function (dense accum): GB (_Cdense_accumB__bxnor_uint16)
// C+=b function (dense accum): GB (_Cdense_accumb__bxnor_uint16)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__bxnor_uint16)
// C=scalar+B GB (_bind1st__bxnor_uint16)
// C=scalar+B' GB (_bind1st_tran__bxnor_uint16)
// C=A+scalar GB (_bind2nd__bxnor_uint16)
// C=A'+scalar GB (_bind2nd_tran__bxnor_uint16)
// C type: uint16_t
// A type: uint16_t
// A pattern? 0
// B type: uint16_t
// B pattern? 0
// BinaryOp: cij = ~((aij) ^ (bij))
#define GB_ATYPE \
uint16_t
#define GB_BTYPE \
uint16_t
#define GB_CTYPE \
uint16_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
uint16_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
uint16_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint16_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = ~((x) ^ (y)) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BXNOR || GxB_NO_UINT16 || GxB_NO_BXNOR_UINT16)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__bxnor_uint16)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint16_t
uint16_t bwork = (*((uint16_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *restrict Cx = (uint16_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__bxnor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
uint16_t alpha_scalar ;
uint16_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((uint16_t *) alpha_scalar_in)) ;
beta_scalar = (*((uint16_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__bxnor_uint16)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__bxnor_uint16)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__bxnor_uint16)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t x = (*((uint16_t *) x_input)) ;
uint16_t *Bx = (uint16_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
uint16_t bij = GBX (Bx, p, false) ;
Cx [p] = ~((x) ^ (bij)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__bxnor_uint16)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint16_t *Cx = (uint16_t *) Cx_output ;
uint16_t *Ax = (uint16_t *) Ax_input ;
uint16_t y = (*((uint16_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
uint16_t aij = GBX (Ax, p, false) ;
Cx [p] = ~((aij) ^ (y)) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((x) ^ (aij)) ; \
}
GrB_Info GB (_bind1st_tran__bxnor_uint16)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t x = (*((const uint16_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint16_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint16_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = ~((aij) ^ (y)) ; \
}
GrB_Info GB (_bind2nd_tran__bxnor_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint16_t y = (*((const uint16_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
transform.h | /*!
* Copyright 2018 XGBoost contributors
*/
#ifndef XGBOOST_COMMON_TRANSFORM_H_
#define XGBOOST_COMMON_TRANSFORM_H_
#include <dmlc/omp.h>
#include <dmlc/common.h>
#include <xgboost/data.h>
#include <utility>
#include <vector>
#include <type_traits> // enable_if
#include "xgboost/host_device_vector.h"
#include "xgboost/span.h"
#include "common.h"
#if defined (__CUDACC__)
#include "device_helpers.cuh"
#endif // defined (__CUDACC__)
namespace xgboost {
namespace common {
constexpr size_t kBlockThreads = 256;
namespace detail {
#if defined(__CUDACC__)
template <typename Functor, typename... SpanType>
__global__ void LaunchCUDAKernel(Functor _func, Range _range,
SpanType... _spans) {
for (auto i : dh::GridStrideRange(*_range.begin(), *_range.end())) {
_func(i, _spans...);
}
}
#endif // defined(__CUDACC__)
} // namespace detail
/*! \brief Do Transformation on HostDeviceVectors.
*
* \tparam CompiledWithCuda A bool parameter used to distinguish compilation
* trajectories, users do not need to use it.
*
* Note: Using Transform is a VERY tricky thing to do. Transform uses template
* argument to duplicate itself into two different types, one for CPU,
* another for CUDA. The trick is not without its flaw:
*
* If you use it in a function that can be compiled by both nvcc and host
* compiler, the behaviour is un-defined! Because your function is NOT
* duplicated by `CompiledWithCuda`. At link time, cuda compiler resolution
* will merge functions with same signature.
*/
template <bool CompiledWithCuda = WITH_CUDA()>
class Transform {
private:
template <typename Functor>
struct Evaluator {
public:
Evaluator(Functor func, Range range, int device, bool shard) :
func_(func), range_{std::move(range)},
shard_{shard},
device_{device} {}
/*!
* \brief Evaluate the functor with input pointers to HostDeviceVector.
*
* \tparam HDV... HostDeviceVectors type.
* \param vectors Pointers to HostDeviceVector.
*/
template <typename... HDV>
void Eval(HDV... vectors) const {
bool on_device = device_ >= 0;
if (on_device) {
LaunchCUDA(func_, vectors...);
} else {
LaunchCPU(func_, vectors...);
}
}
private:
// CUDA UnpackHDV
template <typename T>
Span<T> UnpackHDVOnDevice(HostDeviceVector<T>* _vec) const {
auto span = _vec->DeviceSpan();
return span;
}
template <typename T>
Span<T const> UnpackHDVOnDevice(const HostDeviceVector<T>* _vec) const {
auto span = _vec->ConstDeviceSpan();
return span;
}
// CPU UnpackHDV
template <typename T>
Span<T> UnpackHDV(HostDeviceVector<T>* _vec) const {
return Span<T> {_vec->HostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
template <typename T>
Span<T const> UnpackHDV(const HostDeviceVector<T>* _vec) const {
return Span<T const> {_vec->ConstHostPointer(),
static_cast<typename Span<T>::index_type>(_vec->Size())};
}
// Recursive unpack for Shard.
template <typename T>
void UnpackShard(int device, const HostDeviceVector<T> *vector) const {
vector->SetDevice(device);
}
template <typename Head, typename... Rest>
void UnpackShard(int device,
const HostDeviceVector<Head> *_vector,
const HostDeviceVector<Rest> *... _vectors) const {
_vector->SetDevice(device);
UnpackShard(device, _vectors...);
}
#if defined(__CUDACC__)
template <typename std::enable_if<CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
if (shard_)
UnpackShard(device_, _vectors...);
size_t range_size = *range_.end() - *range_.begin();
// Extract index to deal with possible old OpenMP.
// This deals with situation like multi-class setting where
// granularity is used in data vector.
size_t shard_size = range_size;
Range shard_range {0, static_cast<Range::DifferenceType>(shard_size)};
dh::safe_cuda(cudaSetDevice(device_));
const int kGrids =
static_cast<int>(DivRoundUp(*(range_.end()), kBlockThreads));
if (kGrids == 0) {
return;
}
detail::LaunchCUDAKernel<<<kGrids, kBlockThreads>>>( // NOLINT
_func, shard_range, UnpackHDVOnDevice(_vectors)...);
}
#else
/*! \brief Dummy funtion defined when compiling for CPU. */
template <typename std::enable_if<!CompiledWithCuda>::type* = nullptr,
typename... HDV>
void LaunchCUDA(Functor _func, HDV*... _vectors) const {
LOG(FATAL) << "Not part of device code. WITH_CUDA: " << WITH_CUDA();
}
#endif // defined(__CUDACC__)
template <typename... HDV>
void LaunchCPU(Functor func, HDV*... vectors) const {
omp_ulong end = static_cast<omp_ulong>(*(range_.end()));
dmlc::OMPException omp_exc;
#pragma omp parallel for schedule(static)
for (omp_ulong idx = 0; idx < end; ++idx) {
omp_exc.Run(func, idx, UnpackHDV(vectors)...);
}
omp_exc.Rethrow();
}
private:
/*! \brief Callable object. */
Functor func_;
/*! \brief Range object specifying parallel threads index range. */
Range range_;
/*! \brief Whether sharding for vectors is required. */
bool shard_;
int device_;
};
public:
/*!
* \brief Initialize a Transform object.
*
* \tparam Functor A callable object type.
* \return A Evaluator having one method Eval.
*
* \param func A callable object, accepting a size_t thread index,
* followed by a set of Span classes.
* \param range Range object specifying parallel threads index range.
* \param device Specify GPU to use.
* \param shard Whether Shard for HostDeviceVector is needed.
*/
template <typename Functor>
static Evaluator<Functor> Init(Functor func, Range const range,
int device,
bool const shard = true) {
return Evaluator<Functor> {func, std::move(range), device, shard};
}
};
} // namespace common
} // namespace xgboost
#endif // XGBOOST_COMMON_TRANSFORM_H_
|
GB_binop__bshift_int64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__bshift_int64
// A.*B function (eWiseMult): GB_AemultB__bshift_int64
// A*D function (colscale): (none)
// D*A function (rowscale): (node)
// C+=B function (dense accum): GB_Cdense_accumB__bshift_int64
// C+=b function (dense accum): GB_Cdense_accumb__bshift_int64
// C+=A+B function (dense ewise3): (none)
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__bshift_int64
// C=scalar+B GB_bind1st__bshift_int64
// C=scalar+B' GB_bind1st_tran__bshift_int64
// C=A+scalar GB_bind2nd__bshift_int64
// C=A'+scalar GB_bind2nd_tran__bshift_int64
// C type: int64_t
// A type: int64_t
// B,b type: int8_t
// BinaryOp: cij = GB_bitshift_int64 (aij, bij)
#define GB_ATYPE \
int64_t
#define GB_BTYPE \
int8_t
#define GB_CTYPE \
int64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
0
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
0
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int64_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
int8_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
int64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y, i, j) \
z = GB_bitshift_int64 (x, y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_BSHIFT || GxB_NO_INT64 || GxB_NO_BSHIFT_INT64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void (none)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__bshift_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__bshift_int64
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__bshift_int64
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type int8_t
int8_t bwork = (*((int8_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
#if 0
GrB_Info (node)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *GB_RESTRICT Cx = (int64_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
#undef GB_FREE_ALL
#define GB_FREE_ALL \
{ \
GB_ek_slice_free (&pstart_Mslice, &kfirst_Mslice, &klast_Mslice) ; \
GB_ek_slice_free (&pstart_Aslice, &kfirst_Aslice, &klast_Aslice) ; \
GB_ek_slice_free (&pstart_Bslice, &kfirst_Bslice, &klast_Bslice) ; \
}
GrB_Info GB_AaddB__bshift_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_add_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__bshift_int64
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *pstart_Mslice = NULL, *kfirst_Mslice = NULL, *klast_Mslice = NULL ;
int64_t *pstart_Aslice = NULL, *kfirst_Aslice = NULL, *klast_Aslice = NULL ;
int64_t *pstart_Bslice = NULL, *kfirst_Bslice = NULL, *klast_Bslice = NULL ;
#include "GB_emult_template.c"
GB_FREE_ALL ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__bshift_int64
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *GB_RESTRICT Bb,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t *Cx = (int64_t *) Cx_output ;
int64_t x = (*((int64_t *) x_input)) ;
int8_t *Bx = (int8_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Bb, p)) continue ;
int8_t bij = Bx [p] ;
Cx [p] = GB_bitshift_int64 (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__bshift_int64
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *GB_RESTRICT Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
int64_t *Cx = (int64_t *) Cx_output ;
int64_t *Ax = (int64_t *) Ax_input ;
int8_t y = (*((int8_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
int64_t aij = Ax [p] ;
Cx [p] = GB_bitshift_int64 (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int8_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_int64 (x, aij) ; \
}
GrB_Info GB_bind1st_tran__bshift_int64
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
int8_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t x = (*((const int64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
int64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
int64_t aij = Ax [pA] ; \
Cx [pC] = GB_bitshift_int64 (aij, y) ; \
}
GrB_Info GB_bind2nd_tran__bshift_int64
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int8_t y = (*((const int8_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
target_enter_data_map_messages.c | // RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp -ferror-limit 100 -o - %s
// RUN: %clang_cc1 -triple x86_64-apple-macos10.7.0 -verify -fopenmp -ferror-limit 100 -o - -x c++ %s
int main(int argc, char **argv) {
int r;
#pragma omp target enter data // expected-error {{expected at least one 'map' clause for '#pragma omp target enter data'}}
#pragma omp target enter data map(r) // expected-error {{map type must be specified for '#pragma omp target enter data'}}
#pragma omp target enter data map(tofrom: r) // expected-error {{map type 'tofrom' is not allowed for '#pragma omp target enter data'}}
#pragma omp target enter data map(always, to: r)
#pragma omp target enter data map(always, alloc: r)
#pragma omp target enter data map(always, from: r) // expected-error {{map type 'from' is not allowed for '#pragma omp target enter data'}}
#pragma omp target enter data map(release: r) // expected-error {{map type 'release' is not allowed for '#pragma omp target enter data'}}
#pragma omp target enter data map(delete: r) // expected-error {{map type 'delete' is not allowed for '#pragma omp target enter data'}}
return 0;
}
|
modal_analysis_builder_and_solver.h | /*
==============================================================================
Kratos
A General Purpose Software for Multi-Physics Finite Element Analysis
Version 1.0 (Released on march 05, 2007).
Copyright 2007
Pooyan Dadvand, Riccardo Rossi
pooyan@cimne.upc.edu
rrossi@cimne.upc.edu
CIMNE (International Center for Numerical Methods in Engineering),
Gran Capita' s/n, 08034 Barcelona, Spain
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following condition:
Distribution of this code for any commercial purpose is permissible
ONLY BY DIRECT ARRANGEMENT WITH THE COPYRIGHT OWNER.
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
==============================================================================
*/
/* *********************************************************
*
* Last Modified by: $Author: janosch $
* Date: $Date: 2008-04-29 12:23:09 $
* Revision: $Revision: 1.1 $
*
* ***********************************************************/
#if !defined(KRATOS_MODAL_ANALYSIS_BUILDER_AND_SOLVER )
#define KRATOS_MODAL_ANALYSIS_BUILDER_AND_SOLVER
/* System includes */
#include <set>
#include <omp.h>
/* External includes */
#include "boost/smart_ptr.hpp"
/* Project includes */
#include "includes/define.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "linear_solvers/power_iteration_eigenvalue_solver.h"
namespace Kratos
{
/**@name Kratos Globals */
/*@{ */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
/**@name Enum's */
/*@{ */
/*@} */
/**@name Functions */
/*@{ */
/*@} */
/**@name Kratos Classes */
/*@{ */
/** Short class definition.
Detail class definition.
Current class provides an implementation for standard builder and solving operations.
the RHS is constituted by the unbalanced loads (residual)
Degrees of freedom are reordered putting the restrained degrees of freedom at
the end of the system ordered in reverse order with respect to the DofSet.
Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
this information.
Calculation of the reactions involves a cost very similiar to the calculation of the total residual
\URL[Example of use html]{ extended_documentation/no_ex_of_use.html}
\URL[Example of use pdf]{ extended_documentation/no_ex_of_use.pdf}
\URL[Example of use doc]{ extended_documentation/no_ex_of_use.doc}
\URL[Example of use ps]{ extended_documentation/no_ex_of_use.ps}
\URL[Extended documentation html]{ extended_documentation/no_ext_doc.html}
\URL[Extended documentation pdf]{ extended_documentation/no_ext_doc.pdf}
\URL[Extended documentation doc]{ extended_documentation/no_ext_doc.doc}
\URL[Extended documentation ps]{ extended_documentation/no_ext_doc.ps}
*/
template<class TSparseSpace,
class TDenseSpace , //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class ModalAnalysisBuilderAndSolver
: public BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >
{
public:
/**@name Type Definitions */
/*@{ */
KRATOS_CLASS_POINTER_DEFINITION( ModalAnalysisBuilderAndSolver );
typedef BuilderAndSolver<TSparseSpace,TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
/*@} */
/**@name Life Cycle
*/
/*@{ */
/** Constructor.
*/
ModalAnalysisBuilderAndSolver(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver< TSparseSpace,TDenseSpace,TLinearSolver >(pNewLinearSystemSolver)
{
/* std::cout << "using the standard builder and solver " << std::endl; */
}
/** Destructor.
*/
virtual ~ModalAnalysisBuilderAndSolver() {}
/*@} */
/**@name Operators
*/
/*@{ */
//**************************************************************************
//**************************************************************************
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& b)
{
KRATOS_TRY
if(!pScheme)
KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) );
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
std::vector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin=pElements.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator it_end=pElements.ptr_begin()+element_partition[k+1];
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo);
#pragma omp critical
{
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
AssembleRHS(b,RHS_Contribution,EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
}
}
std::vector<unsigned int> condition_partition;
CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Condition::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator it_begin=ConditionsArray.ptr_begin()+condition_partition[k];
typename ConditionsArrayType::ptr_iterator it_end=ConditionsArray.ptr_begin()+condition_partition[k+1];
// assemble all elements
for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo);
#pragma omp critical
{
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
AssembleRHS(b,RHS_Contribution,EquationId);
}
}
}
double stop_prod = omp_get_wtime();
std::cout << "time: " << stop_prod - start_prod << std::endl;
KRATOS_WATCH("finished parallel building");
/* LHS_Contribution.resize(0,0,false);
RHS_Contribution.resize(0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*it,LHS_Contribution,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
AssembleRHS(b,RHS_Contribution,EquationId);
}
*/
//for( int i=0; i<A.size1(); i++ )
//{
// for( int j=0; j<A.size2(); j++ )
// {
// std::cout << A(i,j);
// }
// std::cout << std::endl;
//}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildLHS(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A)
{
KRATOS_TRY
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) );
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0,0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS(A,LHS_Contribution,EquationId);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildLHS_CompleteOnFreeRows(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A)
{
KRATOS_TRY
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) );
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows(A,LHS_Contribution,EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
LHS_Contribution.resize(0,0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_LHS_Contribution(*it,LHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleLHS_CompleteOnFreeRows(A,LHS_Contribution,EquationId);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void SystemSolve(
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b
)
{
KRATOS_TRY
double start_solve = omp_get_wtime();
double norm_b;
if(b.size() != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if(norm_b != 0.00)
BaseType::mpLinearSystemSolver->Solve(A,Dx,b);
else
TSparseSpace::SetToZero(Dx);
//prints informations about the current time
if (BaseType::GetEchoLevel()>1)
{
std::cout << *(BaseType::mpLinearSystemSolver) << std::endl;
}
double stop_solve= omp_get_wtime();
std::cout << "time: " << stop_solve - start_solve << std::endl;
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildAndSolve( typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b )
{
KRATOS_TRY
boost::timer building_time;
//construct mass matrix structure
TSystemMatrixType M = TSystemMatrixType( A.size1(), A.size2() );
//build matrices
BuildSystemMatrices( pScheme, r_model_part, A, M );
//elapsed time
if(BaseType::GetEchoLevel()>0)
{
std::cout << "Building Time : " << building_time.elapsed() << std::endl;
}
if (BaseType::GetEchoLevel()== 3)
{
std::cout << "before the solution of the system" << std::endl;
std::cout << "stiffness Matrix = " << A << std::endl;
std::cout << "mass Matrix = " << M << std::endl;
std::cout << "unknowns vector = " << Dx << std::endl;
std::cout << "RHS vector = " << b << std::endl;
}
boost::timer solve_time;
// SystemSolve(A,Dx,b);
PowerIterationEigenvalueSolver<TSparseSpace, TDenseSpace, TLinearSolver>
eigenvalue_solver( 1.0e-8, 1000, 1, BaseType::mpLinearSystemSolver );
LocalSystemVectorType Eigenvalues(1);
LocalSystemMatrixType Eigenvectors(1,1);
eigenvalue_solver.Solve( A, M, Eigenvalues, Eigenvectors);
if(BaseType::GetEchoLevel()>0)
{
std::cout << "System Solve Time : " << solve_time.elapsed() << std::endl;
}
if (BaseType::GetEchoLevel()== 3)
{
std::cout << "after the solution of the system" << std::endl;
std::cout << "Eigenvalues = " << Eigenvalues << std::endl;
std::cout << "Eigenvectors = " << Eigenvectors << std::endl;
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildRHSAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
KRATOS_TRY
BuildRHS(pScheme,r_model_part,b);
SystemSolve(A,Dx,b);
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void BuildRHS(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemVectorType& b)
{
KRATOS_TRY
//Getting the Elements
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) );
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin(); it!=pElements.ptr_end(); ++it)
{
//calculate elemental Right Hand Side Contribution
pScheme->Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b,RHS_Contribution,EquationId);
}
LHS_Contribution.resize(0,0,false);
RHS_Contribution.resize(0,false);
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=ConditionsArray.ptr_begin(); it!=ConditionsArray.ptr_end(); ++it)
{
//calculate elemental contribution
pScheme->Condition_Calculate_RHS_Contribution(*it,RHS_Contribution,EquationId,CurrentProcessInfo);
//assemble the elemental contribution
AssembleRHS(b,RHS_Contribution,EquationId);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void SetUpDofSet( typename TSchemeType::Pointer pScheme, ModelPart& r_model_part )
{
KRATOS_TRY
KRATOS_WATCH("setting up the dofs");
//Gets the array of elements from the modeler
ElementsArrayType& pElements = r_model_part.Elements();
Element::DofsVectorType ElementalDofList;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
//mDofSet.clear();
//double StartTime = GetTickCount();
for (typename ElementsArrayType::ptr_iterator it=pElements.ptr_begin();
it!=pElements.ptr_end(); ++it)
{
// gets list of Dof involved on every element
pScheme->GetElementalDofList(*it,ElementalDofList,CurrentProcessInfo);
for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ;
i != ElementalDofList.end() ; ++i)
{
Doftemp.push_back(*i);
//mDofSet.push_back(*i);
}
}
//taking in account conditions
ConditionsArrayType& pConditions = r_model_part.Conditions();
for (typename ConditionsArrayType::ptr_iterator it=pConditions.ptr_begin();
it!=pConditions.ptr_end(); ++it)
{
// gets list of Dof involved on every element
pScheme->GetConditionDofList(*it,ElementalDofList,CurrentProcessInfo);
for(typename Element::DofsVectorType::iterator i = ElementalDofList.begin() ;
i != ElementalDofList.end() ; ++i)
{
//mDofSet.push_back(*i);
Doftemp.push_back(*i);
}
}
Doftemp.Unique();
BaseType::mDofSet = Doftemp;
//throws an execption if there are no Degrees of freedom involved in the analysis
if (BaseType::mDofSet.size()==0)
KRATOS_THROW_ERROR(std::logic_error, "No degrees of freedom!", "");
BaseType::mDofSetIsInitialized = true;
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void SetUpSystem(
ModelPart& r_model_part
)
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors( typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType& pA,
TSystemVectorPointerType& pDx,
TSystemVectorPointerType& pb,
ModelPart& rModelPart
)
{
KRATOS_TRY
if(pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0,0) );
pA.swap(pNewA);
}
if(pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0) );
pDx.swap(pNewDx);
}
if(pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0) );
pb.swap(pNewb);
}
if(BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0) );
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType& A = *pA;
TSystemVectorType& Dx = *pDx;
TSystemVectorType& b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,false);
ConstructMatrixStructure(pScheme, A,rModelPart.Elements(),rModelPart.Conditions(),rModelPart.GetProcessInfo());
}
else
{
if(A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
A.resize(BaseType::mEquationSystemSize,BaseType::mEquationSystemSize,true);
ConstructMatrixStructure(pScheme, A,rModelPart.Elements(),rModelPart.Conditions(),rModelPart.GetProcessInfo());
}
}
if(Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize,false);
if(b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize,false);
//
//if needed resize the vector for the calculation of reactions
if(BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size()-BaseType::mEquationSystemSize;
if(BaseType::mpReactionsVector->size() != ReactionsVectorSize)
BaseType::mpReactionsVector->resize(ReactionsVectorSize,false);
}
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void InitializeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
KRATOS_TRY
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
void FinalizeSolutionStep(
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
}
//**************************************************************************
//**************************************************************************
void CalculateReactions(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{
//refresh RHS to have the correct reactions
BuildRHS(pScheme,r_model_part,b);
int i;
int systemsize = BaseType::mDofSet.size() - BaseType::mpReactionsVector->size();
typename DofsArrayType::ptr_iterator it2;
//std::set<Dof::Pointer,ComparePDof>::iterator it2;
//updating variables
TSystemVectorType& ReactionsVector = *(BaseType::mpReactionsVector);
for (it2=BaseType::mDofSet.ptr_begin(); it2 != BaseType::mDofSet.ptr_end(); ++it2)
{
if ( (*it2)->IsFixed() )
{
i=(*it2)->EquationId();
i-=systemsize;
(*it2)->GetSolutionStepReactionValue() = ReactionsVector[i];
}
}
}
//**************************************************************************
//**************************************************************************
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& A,
TSystemVectorType& Dx,
TSystemVectorType& b)
{}
//**************************************************************************
//**************************************************************************
void ApplyPointLoads(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemVectorType& b)
{}
/**
this function is intended to be called at the end of the solution step to clean up memory
storage not needed
*/
void Clear()
{
this->mDofSet = DofsArrayType();
if(this->mpReactionsVector != NULL)
{
TSparseSpace::Clear( (this->mpReactionsVector) );
}
if (this->GetEchoLevel()>0)
{
KRATOS_WATCH("ModalAnalysisBuilderAndSolver Clear Function called");
}
}
/*@} */
/**@name Operations */
/*@{ */
/*@} */
/**@name Access */
/*@{ */
/*@} */
/**@name Inquiry */
/*@{ */
/*@} */
/**@name Friends */
/*@{ */
/*@} */
protected:
/**@name Protected static Member Variables */
/*@{ */
/*@} */
/**@name Protected member Variables */
/*@{ */
/*@} */
/**@name Protected Operators*/
/*@{ */
//**************************************************************************
virtual void ConstructMatrixStructure( typename TSchemeType::Pointer pScheme,
TSystemMatrixType& A,
ElementsContainerType& rElements,
ConditionsArrayType& rConditions,
ProcessInfo& CurrentProcessInfo)
{
std::size_t equation_size = A.size1();
std::vector<std::vector<std::size_t> > indices(equation_size);
// std::vector<std::vector<std::size_t> > dirichlet_indices(TSystemSpaceType::Size1(mDirichletMatrix));
Element::EquationIdVectorType ids(3,0);
for(typename ElementsContainerType::iterator i_element = rElements.begin() ; i_element != rElements.end() ; i_element++)
{
pScheme->EquationId( *(i_element.base()) , ids, CurrentProcessInfo);
for(std::size_t i = 0 ; i < ids.size() ; i++)
if(ids[i] < equation_size)
{
std::vector<std::size_t>& row_indices = indices[ids[i]];
for(std::size_t j = 0 ; j < ids.size() ; j++)
if(ids[j] < equation_size)
{
AddUnique(row_indices,ids[j]);
//indices[ids[i]].push_back(ids[j]);
}
}
}
for(typename ConditionsArrayType::iterator i_condition = rConditions.begin() ; i_condition != rConditions.end() ; i_condition++)
{
pScheme->Condition_EquationId( *(i_condition.base()), ids, CurrentProcessInfo);
for(std::size_t i = 0 ; i < ids.size() ; i++)
if(ids[i] < equation_size)
{
std::vector<std::size_t>& row_indices = indices[ids[i]];
for(std::size_t j = 0 ; j < ids.size() ; j++)
if(ids[j] < equation_size)
{
AddUnique(row_indices,ids[j]);
// indices[ids[i]].push_back(ids[j]);
}
}
}
//allocating the memory needed
int data_size = 0;
for(std::size_t i = 0 ; i < indices.size() ; i++)
{
data_size += indices[i].size();
}
A.reserve(data_size,false);
//filling with zero the matrix (creating the structure)
for(std::size_t i = 0 ; i < indices.size() ; i++)
{
std::vector<std::size_t>& row_indices = indices[i];
std::sort(row_indices.begin(), row_indices.end());
for(std::vector<std::size_t>::iterator it= row_indices.begin(); it != row_indices.end() ; it++)
{
A.push_back(i,*it,0.00);
// A()(i,*it) = 0.00;
}
//row_indices = std::vector<std::size_t>();
row_indices.clear();
}
}
//**************************************************************************
void AssembleLHS(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize )
{
for (unsigned int j_local=0; j_local<local_size; j_local++)
{
unsigned int j_global=EquationId[j_local];
if ( j_global < BaseType::mEquationSystemSize )
{
A(i_global,j_global) += LHS_Contribution(i_local,j_local);
}
}
}
}
}
//**************************************************************************
void AssembleRHS(
TSystemVectorType& b,
LocalSystemVectorType& RHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag==false) //if we don't need to calculate reactions
{
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs
{
// ASSEMBLING THE SYSTEM VECTOR
b[i_global] += RHS_Contribution[i_local];
}
}
}
else //when the calculation of reactions is needed
{
TSystemVectorType& ReactionsVector = *BaseType::mpReactionsVector;
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize ) //on "free" DOFs
{
// ASSEMBLING THE SYSTEM VECTOR
b[i_global] += RHS_Contribution[i_local];
}
else //on "fixed" DOFs
{
// Assembling the Vector of REACTIONS
ReactionsVector[i_global-BaseType::mEquationSystemSize] -= RHS_Contribution[i_local];
}
}
}
}
/*@} */
/**@name Protected Operations*/
/*@{ */
/*@} */
/**@name Protected Access */
/*@{ */
/*@} */
/**@name Protected Inquiry */
/*@{ */
/*@} */
/**@name Protected LifeCycle */
/*@{ */
/*@} */
private:
/**@name Static Member Variables */
/*@{ */
/*@} */
/**@name Member Variables */
/*@{ */
/*@} */
/**@name Private Operators*/
/*@{ */
/*@} */
/**@name Private Operations*/
/*@{ */
//**************************************************************************
//**************************************************************************
void BuildSystemMatrices(
typename TSchemeType::Pointer pScheme,
ModelPart& r_model_part,
TSystemMatrixType& K,
TSystemMatrixType& M )
{
KRATOS_TRY
if(!pScheme)
KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType& pElements = r_model_part.Elements();
//getting the array of the conditions
ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero( *(BaseType::mpReactionsVector) );
//create a partition of the element array
int number_of_threads = omp_get_max_threads();
std::vector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
double start_prod = omp_get_wtime();
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType K_Contribution = LocalSystemMatrixType(0,0);
LocalSystemMatrixType M_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator
it_begin=pElements.ptr_begin()+element_partition[k];
typename ElementsArrayType::ptr_iterator
it_end=pElements.ptr_begin()+element_partition[k+1];
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
pScheme->CalculateSystemContributions(*it, K_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
(*it)->CalculateMassMatrix( M_Contribution, CurrentProcessInfo );
//(*it)->CalculateLocalSystem( K_Contribution,RHS_Contribution,CurrentProcessInfo );
#pragma omp critical
{
//assemble the elemental contribution
AssembleLHS(K,K_Contribution,EquationId);
AssembleLHS(M,M_Contribution,EquationId);
// clean local elemental memory
pScheme->CleanMemory(*it);
}
}
}
std::vector<unsigned int> condition_partition;
CreatePartition(number_of_threads, ConditionsArray.size(), condition_partition);
#pragma omp parallel for
for(int k=0; k<number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType K_Contribution = LocalSystemMatrixType(0,0);
LocalSystemMatrixType M_Contribution = LocalSystemMatrixType(0,0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Condition::EquationIdVectorType EquationId;
ProcessInfo& CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ConditionsArrayType::ptr_iterator
it_begin=ConditionsArray.ptr_begin()+condition_partition[k];
typename ConditionsArrayType::ptr_iterator
it_end=ConditionsArray.ptr_begin()+condition_partition[k+1];
// assemble all conditions
for (typename ConditionsArrayType::ptr_iterator it=it_begin; it!=it_end; ++it)
{
//calculate elemental contribution
pScheme->Condition_CalculateSystemContributions(*it, K_Contribution, RHS_Contribution, EquationId, CurrentProcessInfo);
(*it)->CalculateMassMatrix( M_Contribution, CurrentProcessInfo );
//(*it)->CalculateLocalSystem( K_Contribution,RHS_Contribution,CurrentProcessInfo );
#pragma omp critical
{
//assemble the elemental contribution
AssembleLHS(K,K_Contribution,EquationId);
AssembleLHS(M,M_Contribution,EquationId);
}
}
}
double stop_prod = omp_get_wtime();
std::cout << "time: " << stop_prod - start_prod << std::endl;
KRATOS_WATCH("finished parallel building");
KRATOS_CATCH("")
}
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType& A,
LocalSystemMatrixType& LHS_Contribution,
Element::EquationIdVectorType& EquationId
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local=0; i_local<local_size; i_local++)
{
unsigned int i_global=EquationId[i_local];
if ( i_global < BaseType::mEquationSystemSize )
{
for (unsigned int j_local=0; j_local<local_size; j_local++)
{
int j_global=EquationId[j_local];
A(i_global,j_global) += LHS_Contribution(i_local,j_local);
}
}
}
}
//******************************************************************************************
//******************************************************************************************
inline void AddUnique(std::vector<std::size_t>& v, const std::size_t& candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while ( i != endit && (*i) != candidate)
{
i++;
}
if( i == endit )
{
v.push_back(candidate);
}
}
//******************************************************************************************
//******************************************************************************************
inline void CreatePartition(unsigned int number_of_threads,const int number_of_rows, std::vector<unsigned int>& partitions)
{
partitions.resize(number_of_threads+1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for(int i = 1; i<number_of_threads; i++)
partitions[i] = partitions[i-1] + partition_size ;
}
/*@} */
/**@name Private Access */
/*@{ */
/*@} */
/**@name Private Inquiry */
/*@{ */
/*@} */
/**@name Un accessible methods */
/*@{ */
/*@} */
}; /* Class ModalAnalysisBuilderAndSolver */
/*@} */
/**@name Type Definitions */
/*@{ */
/*@} */
} /* namespace Kratos.*/
#endif /* KRATOS_MODAL_ANALYSIS_BUILDER_AND_SOLVER defined */
|
CostModel.h | #ifndef pixelbridge_CostModel_h
#define pixelbridge_CostModel_h
/**
* \file CostModel.h
*
* \brief This file holds the implementation of the CostModel.
*
* This file holds the implementation of the CostModel. The CostModel is used
* by nDDI implementations to register the cost of the various operations triggered
* by nDDI commands.
*/
#include <cassert>
#include <iostream>
#include <fstream>
#include <stdlib.h>
#include <sys/types.h>
#include <unistd.h>
#include <time.h>
///\cond
// TODO(CDE): Sort out a clean way of including this, which is normally in NDimensionalDisplayInterface.h.
#define COEFFICIENT_UNCHANGED INT16_MAX
///\endcond
/*
* Definitions for widths for the various
* data in the NDDI display. Modify to perform
* various cost experiments.
*/
#ifdef USE_NARROW_DATA_FIELDS
/// \brief Configures the number of bytes for a pixel.
///
/// Configures the number of bytes for a pixel.
#ifdef USE_ALPHA_CHANNEL
#define BYTES_PER_PIXEL 4
#else
#define BYTES_PER_PIXEL 3
#endif
/// \brief Configures the number of bytes for a frame volume coordinate.
///
/// Configures the number of bytes for a frame volume coordinate.
#define BYTES_PER_FV_COORD 4
/// \brief Configures the number of bytes for a coefficient plane coordinate.
///
/// Configures the number of bytes for a coefficient plane coordinate.
#define BYTES_PER_CP_COORD 2
/// \brief Configures the number of bytes for coefficient matrix coordinate.
///
/// Configures the number of bytes for a coefficient matrix coordinate.
#define BYTES_PER_CM_COORD 1
/// \brief Configures the number of bytes for an input vector value.
///
/// Configures the number of bytes for an input vector value.
#define BYTES_PER_IV_VALUE 4
/// \brief Configures the number of bytes for a coefficient.
///
/// Configures the number of bytes for a coefficient.
#define BYTES_PER_COEFF 4
/// \brief Configures the number of bytes for a scaler.
///
/// Configures the number of bytes for a scaler.
#define BYTES_PER_SCALER 6
#else
#define BYTES_PER_PIXEL 4
#define BYTES_PER_FV_COORD 4
#define BYTES_PER_CP_COORD 4
#define BYTES_PER_CM_COORD 4
#define BYTES_PER_IV_VALUE 4
#define BYTES_PER_COEFF 4
#define BYTES_PER_SCALER 8
#endif
/*
* Helper macros to be used when registering
* cost charges.
*/
/// \brief Helper macro that calculates the bytes used to encode c pixels.
///
/// Helper macro that calculates the bytes used to encode c pixels.
#define CALC_BYTES_FOR_PIXELS(c) (BYTES_PER_PIXEL * c)
/// \brief Helper macro that calculates the bytes used to encode c frame volume coordinate tuples.
///
/// Helper macro that calculates the bytes used to encode c frame volume coordinate tuples.
#define CALC_BYTES_FOR_FV_COORD_TUPLES(c) (BYTES_PER_FV_COORD * frameVolumeDimensionalSizes_.size() * c)
/// \brief Helper macro that calculates the bytes used to encode c tile coordinate tuples.
///
/// Helper macro that calculates the bytes used to encode c tile coordinate tuples.
#define CALC_BYTES_FOR_TILE_COORD_DOUBLES(c) (BYTES_PER_CP_COORD * 2 * c)
/// \brief Helper macro that calculates the bytes used to encode c input vector values.
///
/// Helper macro that calculates the bytes used to encode c input vector values.
#define CALC_BYTES_FOR_IV_UPDATE() (BYTES_PER_IV_VALUE * (inputVector_->getSize() - 2))
/// \brief Helper macro that calculates the bytes used to encode c coefficient matrices.
///
/// Helper macro that calculates the bytes used to encode c coefficent matrices.
#define CALC_BYTES_FOR_CMS(c) (BYTES_PER_COEFF * inputVector_->getSize() * frameVolumeDimensionalSizes_.size() * c)
/// \brief Helper macro that calculates the bytes used to encode c coefficient matrix coordinate tuples
///
/// Helper macro that calculates the bytes used to encode c coefficient matrix coordinate tuples
#define CALC_BYTES_FOR_CM_COORD_DOUBLES(c) (BYTES_PER_CM_COORD * 2 * c)
/// \brief Helper macro that calculates the bytes used to encode c coefficient plane coordinate tuples
///
/// Helper macro that calculates the bytes used to encode c coefficient plane coordinate tuples
#define CALC_BYTES_FOR_CP_COORD_TRIPLES(c) (BYTES_PER_CP_COORD * 3 * c)
using namespace std;
/**
* \brief Namespace for the entire nDDI API.
*
* Namespace for the entire nDDI API.
*/
namespace nddi {
/**
* \brief Enumerates the five different NDDI components for which charges are tracked.
*
* Enumerates the five different NDDI components for which charges are tracked: nDDI
* link, input vector, coefficient matrix, scaler, frame volume. The coefficient matrix
* and scaler components of the coefficient planes are tracked seperately.
*/
typedef enum {
NDDI_LINK_COMPONENT,
INPUT_VECTOR_COMPONENT,
COEFFICIENT_MATRIX_COMPONENT,
SCALER_COMPONENT,
FRAME_VOLUME_COMPONENT
} component_t;
/**
* \brief Each charge is either a READ_ACCESS or WRITE_ACCESS to the component.
*
* Each charge is either a READ_ACCESS or WRITE_ACCESS to the component.
*/
typedef enum {
READ_ACCESS,
WRITE_ACCESS
} memory_access_t;
///\cond
static const char* memory_access_text[] = {"\"READ_ACCESS\"", "\"WRITE_ACCESS\""};
///\endcond
/**
* \brief Used to designate which, if any, of the detailed memory component
* charges to record.
*
* The bits of the logcosts argument to the CostModel constructor indicate
* which of the memory components to log detailed charges for. The default is
* NO_CHARGES, which will still log simple count-based charges such as bytes
* transmitted and commands sent. Setting any of these memory area charges
* will generate very detailed memory accesses which can be expensive.
*/
enum log_charges {
NO_CHARGES = 0,
IV_CHARGES = 1 << 0,
CM_CHARGES = 1 << 1,
SC_CHARGES = 1 << 2,
FV_CHARGES = 1 << 3,
ALL_CHARGES = IV_CHARGES | CM_CHARGES | SC_CHARGES | FV_CHARGES
};
/**
* \brief Base class for a memory component charge.
*
* Base class for a memory component charge.
*/
class Charge {
private:
public:
/// \brief Unique sequence number for the charge.
///
/// Unique sequence number for the charge.
unsigned int sequenceNumber;
/// \brief Empty base constructor.
///
/// Empty base constructor.
Charge(unsigned int sequenceNumber) : sequenceNumber(sequenceNumber) {}
/// \brief Empty base printer.
///
/// Empty base printer. Each subclass will implement a specific printer that
/// encode the charge as JSON.
virtual void print(ofstream &file) {}
};
/**
* \brief Represents the details of data read from or written to the Input Vector.
*
* Represents the details of data read from or written to the Input Vector.
*/
class InputVectorCharge : public Charge {
public:
/// \brief The type of memory component access.
///
/// The type of memory component access.
memory_access_t access;
/// \brief The start position of the access.
///
/// The start position of the access.
unsigned int start;
/// \brief The end position of the access.
///
/// The end position of the access.
unsigned int end;
/**
* \brief InputVectorCharge constructor.
*
* InputVectorCharge constructor.
*
* @param sequenceNumber The unique sequence number to use for this new charge.
* @param access The type of memory component access.
* @param start The start position of the access.
* @param end The end position of the access.
*/
InputVectorCharge(unsigned int sequenceNumber, memory_access_t access, unsigned int start, unsigned int end)
: Charge(sequenceNumber), access(access), start(start), end(end) {}
/**
* \brief Print the JSON for the this memory component charge.
*
* Print the JSON for the this memory component charge.
*
* @param file Output file stream for the JSON.
*/
void print(ofstream &file) {
file << "{" << endl;
file << " \"sequenceNumber\" : " << sequenceNumber << "," << endl;
file << " \"inputVectorCharge\" : {" << endl;
file << " \"access\" : " << memory_access_text[access] << "," << endl;
file << " \"start\" : " << start << "," << endl;
file << " \"end\" : " << end << endl;
file << " }" << endl;
file << "}" << endl;
}
};
/**
* \brief Represents the details of data read from or written to single Coefficients.
*
* Represents the details of data read from or written to single Coefficients.
*/
class CoefficientCharge : public Charge {
public:
/// \brief The type of memory component access.
///
/// The type of memory component access.
memory_access_t access;
/// \brief The start coordinate of the access.
///
/// The start coordinate of the access.
vector<unsigned int> start;
/// \brief The end coordinate of the access.
///
/// The end coordinate of the access.
vector<unsigned int> end;
/// \brief The row of the coefficient being accessed.
///
/// The row of the coefficient being accessed.
int row;
/// \brief The column of the coefficient being accessed.
///
/// The column of the coefficient being accessed.
int col;
/**
* \brief CoefficientCharge constructor.
*
* CoefficientCharge constructor.
*
* @param sequenceNumber The unique sequence number to use for this new charge.
* @param access The type of memory component access.
* @param start The start coordinate of the access.
* @param end The end coordinate of the access.
* @param row The row of the coefficient being accessed.
* @param col The column of the coefficient being accessed.
*/
CoefficientCharge(unsigned int sequenceNumber, memory_access_t access, vector<unsigned int> start, vector<unsigned int> end, int row, int col)
: Charge(sequenceNumber), access(access), start(start), end(end), row(row), col(col) {}
/**
* \brief Print the JSON for the this memory component charge.
*
* Print the JSON for the this memory component charge.
*
* @param file Output file stream for the JSON.
*/
void print(ofstream &file) {
file << "{" << endl;
file << " \"sequenceNumber\" : " << sequenceNumber << "," << endl;
file << " \"coefficientCharge\" : {" << endl;
file << " \"access\" : " << memory_access_text[access] << "," << endl;
file << " \"start\" : " << "[" << start[0];
for (int i = 1; i < start.size(); i++) { file << "," << start[i]; }
file << "]," << endl;
file << " \"end\" : " << "[" << end[0];
for (int i = 1; i < end.size(); i++) { file << "," << end[i]; }
file << "]," << endl;
file << " \"row\" : " << row << "," << endl;
file << " \"col\" : " << col << endl;
file << " }" << endl;
file << "}" << endl;
}
};
/**
* \brief Represents the details of data read from or written to Coefficient Matrices.
*
* Represents the details of data read from or written to Coefficient Matrices.
*/
class CoefficientMatrixCharge : public Charge {
public:
/// \brief The type of memory component access.
///
/// The type of memory component access.
memory_access_t access;
/// \brief The start coordinate of the access.
///
/// The start coordinate of the access.
vector<unsigned int> start;
/// \brief The end coordinate of the access.
///
/// The end coordinate of the access.
vector<unsigned int> end;
/// \brief Holds a coefficient with values masked out used to indicate which coefficients
/// within the matrices were updated.
///
/// Holds a coefficient matrix with values masked out used to indicate which coefficients
/// within the matrices were updated.
vector< vector<int> > cm;
/**
* \brief CoefficientMatrixCharge constructor.
*
* CoefficientMatrixCharge constructor.
*
* @param sequenceNumber The unique sequence number to use for this new charge.
* @param access The type of memory component access.
* @param start The start coordinate of the access.
* @param end The end coordinate of the access.
* @param cm Holds a coefficient matrix with values masked out used to indicate which coefficients
* within the matrices were updated.
*/
CoefficientMatrixCharge(unsigned int sequenceNumber, memory_access_t access, vector<unsigned int> start, vector<unsigned int> end, vector< vector<int> > cm)
: Charge(sequenceNumber), access(access), start(start), end(end), cm(cm) {}
/**
* \brief Print the JSON for the this memory component charge.
*
* Print the JSON for the this memory component charge.
*
* @param file Output file stream for the JSON.
*/
void print(ofstream &file) {
file << "{" << endl;
file << " \"sequenceNumber\" : " << sequenceNumber << "," << endl;
file << " \"coefficientMatrixCharge\" : {" << endl;
file << " \"access\" : " << memory_access_text[access] << "," << endl;
file << " \"start\" : " << "[" << start[0];
for (int i = 1; i < start.size(); i++) { file << "," << start[i]; }
file << "]," << endl;
file << " \"end\" : " << "[" << end[0];
for (int i = 1; i < end.size(); i++) { file << "," << end[i]; }
file << "]," << endl;
file << " \"cm\" : " << "[";
for (int row = 0; row < cm.size(); row++) {
if (row != 0) { file << ","; }
file << "[";
for (int col = 0; col < cm[row].size(); col++) {
if (col != 0) { file << ","; }
file << cm[row][col];
}
file << "]";
}
file << "]" << endl;
file << " }" << endl;
file << "}" << endl;
}
};
/**
* \brief Represents the details of data read from or written to Scalers.
*
* Represents the details of data read from or written to Scalers.
*/
class ScalerCharge : public Charge {
public:
/// \brief The type of memory component access.
///
/// The type of memory component access.
memory_access_t access;
/// \brief The start coordinate of the access.
///
/// The start coordinate of the access.
vector<unsigned int> start;
/// \brief The end coordinate of the access.
///
/// The end coordinate of the access.
vector<unsigned int> end;
/**
* \brief ScalerCharge constructor.
*
* ScalerCharge constructor.
*
* @param sequenceNumber The unique sequence number to use for this new charge.
* @param access The type of memory component access.
* @param start The start coordinate of the access.
* @param end The end coordinate of the access.
*/
ScalerCharge(unsigned int sequenceNumber, memory_access_t access, vector<unsigned int> start, vector<unsigned int> end)
: Charge(sequenceNumber), access(access), start(start), end(end) {}
/**
* \brief Print the JSON for the this memory component charge.
*
* Print the JSON for the this memory component charge.
*
* @param file Output file stream for the JSON.
*/
void print(ofstream &file) {
file << "{" << endl;
file << " \"sequenceNumber\" : " << sequenceNumber << "," << endl;
file << " \"scalerCharge\" : {" << endl;
file << " \"access\" : " << memory_access_text[access] << "," << endl;
file << " \"start\" : " << "[" << start[0];
for (int i = 1; i < start.size(); i++) { file << "," << start[i]; }
file << "]," << endl;
file << " \"end\" : " << "[" << end[0];
for (int i = 1; i < end.size(); i++) { file << "," << end[i]; }
file << "]" << endl;
file << " }" << endl;
file << "}" << endl;
}
};
/**
* \brief Represents the details of data read from or written to the Frame Volume.
*
* Represents the details of data read from or written to the Frame Volume.
*/
class FrameVolumeCharge : public Charge {
public:
/// \brief The type of memory component access.
///
/// The type of memory component access.
memory_access_t access;
/// \brief The start coordinate of the access.
///
/// The start coordinate of the access.
vector<unsigned int> start;
/// \brief The end coordinate of the access.
///
/// The end coordinate of the access.
vector<unsigned int> end;
/**
* \brief FrameVolumeCharge constructor.
*
* FrameVolumeCharge constructor.
*
* @param sequenceNumber The unique sequence number to use for this new charge.
* @param access The type of memory component access.
* @param start The start coordinate of the access.
* @param end The end coordinate of the access.
*/
FrameVolumeCharge(unsigned int sequenceNumber, memory_access_t access, vector<unsigned int> start, vector<unsigned int> end)
: Charge(sequenceNumber), access(access), start(start), end(end) {}
/**
* \brief Print the JSON for the this memory component charge.
*
* Print the JSON for the this memory component charge.
*
* @param file Output file stream for the JSON.
*/
void print(ofstream &file) {
file << "{" << endl;
file << " \"sequenceNumber\" : " << sequenceNumber << "," << endl;
file << " \"frameVolumeCharge\" : {" << endl;
file << " \"access\" : " << memory_access_text[access] << "," << endl;
file << " \"start\" : " << "[" << start[0];
for (int i = 1; i < start.size(); i++) { file << "," << start[i]; }
file << "]," << endl;
file << " \"end\" : " << "[" << end[0];
for (int i = 1; i < end.size(); i++) { file << "," << end[i]; }
file << "]" << endl;
file << " }" << endl;
file << "}" << endl;
}
};
/**
* \brief The main CostModel class.
*
* The CostModel provides a means to log the a variety of NDDI charges including commands sent,
* bytes transmitted, pixels blended, pixels mapped, and memory component accesses.
*/
class CostModel {
private:
unsigned long linkCommandsSent;
unsigned long linkBytesSent;
unsigned long pixelsBlended;
unsigned long pixelsMapped;
unsigned long inputVectorReads;
unsigned long inputVectorWrites;
unsigned long inputVectorBytesRead;
unsigned long inputVectorBytesWritten;
unsigned long coefficientPlaneReads;
unsigned long coefficientPlaneWrites;
unsigned long coefficientPlaneBytesRead;
unsigned long coefficientPlaneBytesWritten;
unsigned long frameVolumeReads;
unsigned long frameVolumeWrites;
unsigned long frameVolumeBytesRead;
unsigned long frameVolumeBytesWritten;
vector<unsigned int> fvDimensions;
unsigned int inputVectorSize = 0;
vector<Charge*> charges;
bool headless = false;
unsigned char logcosts = NO_CHARGES;
ofstream logfile;
public:
/**
* \brief Basic constructor that does not include memory component sizes.
*
* Basic constructor that does include memory component sizes.
*
* @param headless Indicates if the display is headless, passed here for convenient retrieval by the application later.
* @param logcosts 8-bit field holding values from the log_costs enumeration used to indicate which memory component charges should be logged in detail.
*/
CostModel(bool headless, unsigned char logcosts)
: headless(headless), logcosts(logcosts) {
clearCosts();
}
/**
* \brief Main constructor which includes memory component sizes.
*
* Main constructor which includes memory component sizes.
*
* @param fvDimensions Vector holding the dimensions of the frame volume.
* @param inputVectorSize Integer holding the size of the input vector.
* @param headless Indicates if the display is headless, passed here for convenient retrieval by the application later.
* @param logcosts 8-bit field holding values from the log_costs enumeration used to indicate which memory component charges should be logged in detail.
*/
CostModel(vector<unsigned int> &fvDimensions, unsigned int inputVectorSize, bool headless, unsigned char logcosts)
: fvDimensions(fvDimensions), inputVectorSize(inputVectorSize), headless(headless), logcosts(logcosts) {
clearCosts();
}
/**
* \brief Destructor.
*
* Destructor.
*/
~CostModel() {
for (int i = 0; i < charges.size(); i++) {
delete(charges[i]);
}
}
/**
* \brief Zeroes the various counts and deletes any detailed memory component charges.
*
* Zeroes the various counts and deletes any detailed memory component charges. Used
* primarily after initial display setup to exclude those costs.
*/
void clearCosts() {
linkCommandsSent = 0;
linkBytesSent = 0;
pixelsBlended = 0;
pixelsMapped = 0;
inputVectorReads = 0;
inputVectorWrites = 0;
inputVectorBytesRead = 0;
inputVectorBytesWritten = 0;
coefficientPlaneReads = 0;
coefficientPlaneWrites = 0;
coefficientPlaneBytesRead = 0;
coefficientPlaneBytesWritten = 0;
frameVolumeReads = 0;
frameVolumeWrites = 0;
frameVolumeBytesRead = 0;
frameVolumeBytesWritten = 0;
for (int i = 0; i < charges.size(); i++) {
delete(charges[i]);
}
charges.clear();
}
/**
* \brief Registers a charge for the input vector memory component.
*
* Registers a charge for the input vector memory component.
* Will update the global counts and optionally log a detailed charge if
* the cost model was constructed with the proper log_charges.
*
* @param access The type of memory component access.
* @param start The start position of the access.
* @param end The end position of the access.
* @param count The number of identical charges to register. Default is 1.
*/
void registerInputVectorMemoryCharge(
memory_access_t access,
unsigned int start,
unsigned int end,
unsigned int count = 1) {
unsigned int bytes = (end - start + 1) * BYTES_PER_IV_VALUE * count;
if (access == READ_ACCESS) {
#pragma omp atomic
inputVectorReads++;
#pragma omp atomic
inputVectorBytesRead += bytes;
} else {
#pragma omp atomic
inputVectorWrites++;
#pragma omp atomic
inputVectorBytesWritten += bytes;
}
if (logcosts & IV_CHARGES) {
InputVectorCharge* c = new InputVectorCharge(charges.size(), access, start, end);
charges.push_back(c);
}
}
/**
* \brief Registers a single coefficient charge for the coefficient planes memory component.
*
* Registers a single coefficient charge for the coefficient planes memory component.
* Will update the global counts and optionally log a detailed charge if
* the cost model was constructed with the proper log_charges.
*
* @param access The type of memory component access.
* @param start The start coordinate of the access.
* @param end The end coordinate of the access.
* @param cmRow The row of the coefficient being accessed.
* @param cmCol The column of the coefficient being accessed.
*/
void registerCoefficientMemoryCharge(
memory_access_t access,
vector<unsigned int> &start,
vector<unsigned int> &end,
int cmRow, int cmCol) {
assert(start.size() == end.size());
unsigned int bytes = BYTES_PER_COEFF;
for (int i = 0; i < start.size(); i++) {
bytes *= end[i] - start[i] + 1;
}
if (access == READ_ACCESS) {
#pragma omp atomic
coefficientPlaneReads++;
#pragma omp atomic
coefficientPlaneBytesRead += bytes;
} else {
#pragma omp atomic
coefficientPlaneWrites++;
#pragma omp atomic
coefficientPlaneBytesWritten += bytes;
}
if (logcosts & CM_CHARGES) {
CoefficientCharge* c = new CoefficientCharge(charges.size(), access, start, end, cmRow, cmCol);
charges.push_back(c);
}
}
/**
* \brief Registers a coefficent matrix charge for the coeffcient planes memory component.
*
* Registers a coefficent matrix charge for the coefficient planes memory component.
* Will update the global counts and optionally log a detailed charge if
* the cost model was constructed with the proper log_charges.
*
* @param access The type of memory component access.
* @param start The start coordinate of the access.
* @param end The end coordinate of the access.
* @param coefficientMatrix Holds a coefficient matrix with values masked out used to indicate
* which coefficients within the matrices were updated.
*/
void registerCoefficientMatrixMemoryCharge(
memory_access_t access,
vector<unsigned int> &start,
vector<unsigned int> &end,
vector< vector<int> > &coefficientMatrix) {
assert(start.size() == end.size());
unsigned int bytes = 0;
for (int row = 0; row < coefficientMatrix.size(); row++) {
for (int col = 0; col < coefficientMatrix[row].size(); col++) {
if (coefficientMatrix[row][col] != COEFFICIENT_UNCHANGED) {
bytes += BYTES_PER_COEFF;
}
}
}
for (int i = 0; i < start.size(); i++) {
bytes *= end[i] - start[i] + 1;
}
if (access == READ_ACCESS) {
#pragma omp atomic
coefficientPlaneReads++;
#pragma omp atomic
coefficientPlaneBytesRead += bytes;
} else {
#pragma omp atomic
coefficientPlaneWrites++;
#pragma omp atomic
coefficientPlaneBytesWritten += bytes;
}
if (logcosts & CM_CHARGES) {
CoefficientMatrixCharge* c = new CoefficientMatrixCharge(charges.size(), access, start, end, coefficientMatrix);
charges.push_back(c);
}
}
/**
* \brief Registers a scaler charge for the coefficient planes memory component.
*
* Registers a scaler charge for the coefficient planes memory component.
* Will update the global counts and optionally log a detailed charge if
* the cost model was constructed with the proper log_charges.
*
* @param access The type of memory component access.
* @param start The start coordinate of the access.
* @param end The end coordinate of the access.
*/
void registerScalerMemoryCharge(
memory_access_t access,
vector<unsigned int> &start,
vector<unsigned int> &end) {
assert(start.size() == end.size());
unsigned int bytes = BYTES_PER_SCALER;
for (int i = 0; i < start.size(); i++) {
bytes *= end[i] - start[i] + 1;
}
// TODO(CDE): Update to use specific scaler counts. PixelBridge statistics and csv will need to be updated.
if (access == READ_ACCESS) {
#pragma omp atomic
coefficientPlaneReads++;
#pragma omp atomic
coefficientPlaneBytesRead += bytes;
} else {
#pragma omp atomic
coefficientPlaneWrites++;
#pragma omp atomic
coefficientPlaneBytesWritten += bytes;
}
if (logcosts & SC_CHARGES) {
ScalerCharge* c = new ScalerCharge(charges.size(), access, start, end);
charges.push_back(c);
}
}
/**
* \brief Registers a charge for the frame volume memory component.
*
* Registers a charge for the frame volume memory component.
* Will update the global counts and optionally log a detailed charge if
* the cost model was constructed with the proper log_charges.
*
* @param access The type of memory component access.
* @param start The start coordinate of the access.
* @param end The end coordinate of the access.
*/
void registerFrameVolumeMemoryCharge(
memory_access_t access,
vector<unsigned int> &start,
vector<unsigned int> &end) {
assert(start.size() == end.size());
unsigned int bytes = BYTES_PER_PIXEL;
for (int i = 0; i < start.size(); i++) {
bytes *= end[i] - start[i] + 1;
}
if (access == READ_ACCESS) {
#pragma omp atomic
frameVolumeReads++;
#pragma omp atomic
frameVolumeBytesRead += bytes;
} else {
#pragma omp atomic
frameVolumeWrites++;
#pragma omp atomic
frameVolumeBytesWritten += bytes;
}
if (logcosts & FV_CHARGES) {
FrameVolumeCharge* c = new FrameVolumeCharge(charges.size(), access, start, end);
charges.push_back(c);
}
}
/**
* \brief Registers a transmission charge.
*
* Registers a transmission charge, updating both the command count by one and the
* number of bytes transmitted.
*
* @param numBytes The number of bytes to charge.
* @param time Deprecated.
*/
void registerTransmissionCharge(unsigned long numBytes, unsigned long time) {
#pragma omp atomic
linkCommandsSent++;
#pragma omp atomic
linkBytesSent += numBytes;
}
/**
* \brief Registers a number of pixel blend charges.
*
* Registers a number of pixel blend charges.
*
* @param numBlends The number of blends to charge.
*/
void registerPixelBlendCharge(unsigned long numBlends) {
#pragma omp atomic
pixelsBlended += numBlends;
}
/**
* \brief Registers a number of pixel mapping charges.
*
* Registers a number of pixel mapping charges. A pixel mapping
* is the set of calculations to determine which pixel to map
* from the frame volume to a location on the display panel.
*
* @param numMappings The number of mappings to charge.
*/
void registerPixelMappingCharge(unsigned long numMappings) {
#pragma omp atomic
pixelsMapped += numMappings;
}
/**
* \brief Used to get the number of commands sent over the nddi link.
*
* Used to get the number of commands sent over the nddi link.
*
* @return Count of commands sent over the nddi link.
*/
unsigned long getLinkCommandsSent() {
return linkCommandsSent;
}
/**
* \brief Used to get the number of bytes sent over the nddi link.
*
* Used to get the number of bytes sent over the nddi link.
*
* @return Count of bytes sent over the nddi link.
*/
unsigned long getLinkBytesTransmitted() {
return linkBytesSent;
}
/**
* \brief Used to get the count of read accesses to a memory component.
*
* Used to get the count of read accesses to a memory component.
*
* @param component Specifies which component to get the read access count for.
* @return Count of read accesses to a memory component.
*/
unsigned long getReadAccessCount(component_t component) {
unsigned long count = 0;
switch (component) {
case INPUT_VECTOR_COMPONENT:
count = inputVectorReads;
break;
case COEFFICIENT_MATRIX_COMPONENT:
count = coefficientPlaneReads;
break;
case FRAME_VOLUME_COMPONENT:
count = frameVolumeReads;
break;
default:
break;
}
return count;
}
/**
* \brief Used to get the count of write accesses to a memory component.
*
* Used to get the count of write accesses to a memory component.
*
* @param component Specifies which component to get the write access count for.
* @return Count of write accesses to a memory component.
*/
unsigned long getWriteAccessCount(component_t component) {
unsigned long count = 0;
switch (component) {
case INPUT_VECTOR_COMPONENT:
count = inputVectorWrites;
break;
case COEFFICIENT_MATRIX_COMPONENT:
count = coefficientPlaneWrites;
break;
case FRAME_VOLUME_COMPONENT:
count = frameVolumeWrites;
break;
default:
break;
}
return count;
}
/**
* \brief Used to get the count of bytes read from a memory component.
*
* Used to get the count of bytes read from a memory component.
*
* @param component Specifies which component to get the bytes read for.
* @return Count of bytes read from a memory component.
*/
unsigned long getBytesRead(component_t component) {
unsigned long count = 0;
switch (component) {
case INPUT_VECTOR_COMPONENT:
count = inputVectorBytesRead;
break;
case COEFFICIENT_MATRIX_COMPONENT:
count = coefficientPlaneBytesRead;
break;
case FRAME_VOLUME_COMPONENT:
count = frameVolumeBytesRead;
break;
default:
break;
}
return count;
}
/**
* \brief Used to get the count of bytes written to a memory component.
*
* Used to get the count of bytes written to a memory component.
*
* @param component Specifies which component to get the bytes written for.
* @return Count of bytes written to a memory component.
*/
unsigned long getBytesWritten(component_t component) {
unsigned long count = 0;
switch (component) {
case INPUT_VECTOR_COMPONENT:
count = inputVectorBytesWritten;
break;
case COEFFICIENT_MATRIX_COMPONENT:
count = coefficientPlaneBytesWritten;
break;
case FRAME_VOLUME_COMPONENT:
count = frameVolumeBytesWritten;
break;
default:
break;
}
return count;
}
/**
* \brief Used the get the count of pixels blended.
*
* Used the get the count of pixels blended.
*
* @return Count of pixels blended.
*/
unsigned long getPixelsBlended() {
return pixelsBlended;
}
/**
* \brief Used the get the count of pixels mapped.
*
* Used the get the count of pixels mapped.
*
* @return Count of pixels mapped.
*/
unsigned long getPixelsMapped() {
return pixelsMapped;
}
/**
* \brief Used to determine if the display owning this CostModel is headless.
*
* Used to determine if the display owning this CostModel is headless.
*
* @return True if headless, false otherwise.
*/
bool isHeadless() {
return headless;
}
/**
* \brief Prints the JSON for all of the charges registered.
*
* Prints the JSON for all of the charges registered. Prints the configuration
* and then any detailed memory component charge that was logged. If an application
* wants to print the global counts, then the individual getters can be used to get
* the counts and the the application can display them in the desired format.
*/
void printCharges() {
assert(fvDimensions.size() > 0);
assert(inputVectorSize > 0);
char filename[24];
srand(time(NULL) * getpid());
sprintf(filename, "costlog-%x.json", rand() % 0xffffff + 1);
logfile.open(filename);
cout << filename;
logfile << "{\n" << endl;
logfile << "\"config\" : {" << endl;
logfile << " \"bytePerPixel\": " << BYTES_PER_PIXEL << "," << endl;
logfile << " \"bytePerIvValue\": " << BYTES_PER_IV_VALUE << "," << endl;
logfile << " \"bytePerCoefficient\": " << BYTES_PER_COEFF << "," << endl;
logfile << " \"bytePerScaler\": " << BYTES_PER_SCALER << "," << endl;
logfile << " \"inputVectorSize\": " << inputVectorSize << "," << endl;
logfile << " \"fvDimensions\": [";
for (int i = 0; i < fvDimensions.size(); i++) {
if (i > 0)
logfile << ",";
logfile << fvDimensions[i];
}
logfile << "]" << endl;
logfile << "}," << endl;
logfile << "\"charges\": [" << endl;
for (int i = 0; i < charges.size(); i++) {
if (i > 0)
logfile << "," << endl;
charges[i]->print(logfile);
}
logfile << "]" << endl;
logfile << "\n}" << endl;
logfile.close();
}
};
}
#endif
|
CPUMatrixImpl.h | //
// Copyright (c) Microsoft. All rights reserved.
// Licensed under the MIT license. See LICENSE.md file in the project root for full license information.
//
// CPUMatrix.h : template implementation of all matrix functions on the CPU side
//
#pragma once
#include "Basics.h"
#include "File.h"
#include "CPUMatrix.h"
#include "TensorOps.h"
#include <assert.h>
#include <stdexcept>
#include <omp.h>
#include <math.h>
#include <random>
#include <chrono>
#include <exception>
#include <thread>
#include <iostream>
#include <algorithm>
#pragma warning(push)
#pragma warning(disable:4244) // 'conversion' conversion from 'type1' to 'type2', possible loss of data
#include <boost/random/normal_distribution.hpp>
#pragma warning(pop)
#include <boost/random/uniform_real_distribution.hpp>
#ifdef _WIN32
#define NOMINMAX
#include "Windows.h"
#else
#include <cfloat>
#endif
#ifdef LEAKDETECT
#include <vld.h>
#endif
#pragma warning(disable : 4100) // unreferenced formal parameter; "struct TensorOpReduction<ElemType, OPFN, typename ReductionOp, N, -1>" trigger this
#pragma warning(disable : 4127) // conditional expression is constant; "if (sizeof(ElemType)==sizeof(float))" triggers this
#pragma warning(disable : 4244) // unreachable code; triggered for unknown reasons
#pragma warning(disable : 4702) // conversion from 'double' to 'float'
#ifdef USE_MKL
// requires MKL 10.0 and above
#include <mkl.h>
#else
#ifdef _MSC_VER
// Visual Studio doesn't define standard complex types properly
#define HAVE_LAPACK_CONFIG_H
#define LAPACK_COMPLEX_STRUCTURE
#endif
#include <cblas.h>
#include <lapacke.h>
#endif
#define SWAP(a, b) \
{ \
(a) ^= (b); \
(b) ^= (a); \
(a) ^= (b); \
}
#define IDX2C(i, j, ld) (((j) * (ld)) + (i)) // 0 based indexing
namespace Microsoft { namespace MSR { namespace CNTK {
#pragma region Helpful Enum Definitions
enum class MatrixOrder
{
RowMajor = 101, // row-major arrays
ColMajor = 102 // column-major arrays
};
enum class MatrixTranspose : char
{
NoTrans = 'N', // trans='N'
Trans = 'T', // trans='T'
ConjTrans = 'C' // trans='C'
};
enum class SymMatrixType : char
{
Up = 'U', // symmetric matrix is stored in the upper part
Low = 'L', // symmetric matrix is stored in thelower part
Full = 'F', // full populated
NotSymmetric = 'N' // not a symmetric matrix
};
enum class MatrixOpSide : char
{
Left = 'L', // left multiply
Right = 'R', // right multiply
};
#pragma endregion Helpful Enum Definitions
#pragma region Constructors and Destructor
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix()
{
ZeroInit();
}
// helper to allocate an array of ElemType
// Use this instead of new[] to get NaN initialization for debugging.
template <class ElemType>
static ElemType* NewArray(size_t n)
{
ElemType* p = new ElemType[n]();
#if 0 // _DEBUG
ElemType nan = Matrix<ElemType>::MakeNan(__LINE__);
for (size_t i = 0; i < n; i++)
p[i] = nan;
#endif
return p;
}
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols)
{
ZeroInit();
m_numRows = numRows;
m_numCols = numCols;
SetSizeAllocated(GetNumElements());
if (GetNumElements() != 0)
{
SetBuffer(NewArray<ElemType>(GetNumElements()), GetNumElements() * sizeof(ElemType));
}
}
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags)
{
ZeroInit();
SetValue(numRows, numCols, pArray, matrixFlags);
}
//copy constructor, deep copy
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& deepCopyFrom)
{
ZeroInit();
SetValue(deepCopyFrom);
}
//assignment operator, deep copy
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(const CPUMatrix<ElemType>& deepCopyFrom)
{
SetValue(deepCopyFrom);
return *this;
}
//move constructor, shallow copy
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(CPUMatrix<ElemType>&& moveFrom)
: Base(/* shallow */ true)
{
ShallowCopyFrom(moveFrom);
moveFrom.ZeroValues();
}
// Shortcut of default constructor + shallow copy, to avoid one initialization
template <class ElemType>
CPUMatrix<ElemType>::CPUMatrix(const CPUMatrix<ElemType>& shallowCopyFrom, bool shallow)
: Base(shallow)
{
ShallowCopyFrom(shallowCopyFrom);
}
//move assignment operator, shallow copy
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator=(CPUMatrix<ElemType>&& moveFrom)
{
if (this != &moveFrom)
{
ShallowCopyFrom(moveFrom);
// release the pointer from the source object so that the destructor won't release it twice
moveFrom.ZeroValues();
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::Clear()
{
ZeroInit();
}
#pragma endregion Constructors and Destructor
#pragma region Basic Operators
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::ColumnSlice(size_t startColumn, size_t numCols) const
{
if (startColumn + numCols > m_numCols)
InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) m_numCols);
CPUMatrix<ElemType> slice(*this, /* shallow= */ true);
slice.m_numCols = numCols;
slice.m_sliceViewOffset = m_sliceViewOffset + startColumn * m_numRows;
return slice;
}
// set this(:, 0:numCols-1) = fromMatrix(:, startColumn : startColumn+numCols-1)
// TODO: why not say *this = ColumnSlice()?
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols)
{
if (startColumn + numCols > fromMatrix.m_numCols)
InvalidArgument("The slice (%d+%d) is out of range of the source matrix (%d).", (int) startColumn, (int) numCols, (int) fromMatrix.m_numCols);
Clear();
ShallowCopyFrom(fromMatrix);
m_numCols = numCols;
m_sliceViewOffset = fromMatrix.m_sliceViewOffset + startColumn * m_numRows;
return *this;
}
// set this(: , startColumn:startColumn+numCols-1)= fromMatrix;
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetColumnSlice(const CPUMatrix<ElemType>& fromMatrix, size_t startColumn, size_t numCols)
{
if (startColumn + numCols > m_numCols)
LogicError("The slice is out of range of the destination matrix.");
if (numCols > fromMatrix.GetNumCols())
InvalidArgument("The slice (%d) is out of range of the source matrix (%d).", (int) numCols, (int) fromMatrix.GetNumCols());
if (m_numRows != fromMatrix.m_numRows)
LogicError("The number of rows in source and destination matrices do not match");
memcpy(Data() + startColumn * m_numRows, fromMatrix.Data(), numCols * m_numRows * sizeof(ElemType));
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::CopyColumnsStrided(const CPUMatrix<ElemType>& fromMatrix, size_t numCols, size_t srcNumColsStride, size_t destNumColsStride)
{
if ((((numCols - 1) * srcNumColsStride) + 1) > fromMatrix.m_numCols)
LogicError("The numCols to copy and srcNumColsStride specified is out of range of the source matrix.");
if ((((numCols - 1) * destNumColsStride) + 1) > m_numCols)
LogicError("The numCols to copy and srcNumColsStride specified is out of range of the destination matrix.");
if (m_numRows != fromMatrix.m_numRows)
LogicError("The number of rows in source and destination matrices do not match");
long n = (long) numCols, m = (long) m_numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (size_t i = 0; i < (m & ~3); i += 4)
{
us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride);
us(i + 1, j * destNumColsStride) = fromMatrix(i + 1, j * srcNumColsStride);
us(i + 2, j * destNumColsStride) = fromMatrix(i + 2, j * srcNumColsStride);
us(i + 3, j * destNumColsStride) = fromMatrix(i + 3, j * srcNumColsStride);
}
// handle remaining
for (size_t i = m & ~3; i < m; i++)
{
us(i, j * destNumColsStride) = fromMatrix(i, j * srcNumColsStride);
}
}
}
//for each column of a, we add all rows of a to this starting from startIndex
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.GetNumRows() != numRows)
LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows.");
if (startIndex + numRows > GetNumRows())
LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddToRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (size_t i = 0, startRow = startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(startRow, j) = a(i, j);
us(startRow + 1, j) = a(i + 1, j);
us(startRow + 2, j) = a(i + 2, j);
us(startRow + 3, j) = a(i + 3, j);
}
// handle remaining stuffs
for (size_t i = m & ~3, startRow = startIndex + (m & ~3); i < m; i++, startRow++)
{
us(startRow, j) = a(i, j);
}
}
return *this;
}
//for each column of a, we assign numRows starting from startIndex to this
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (startIndex + numRows > a.GetNumRows())
LogicError("AssignRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows().");
RequireSize(numRows, a.GetNumCols());
long n = (long) a.GetNumCols(); // note: OpenMP requires loop indices to be long, not size_t
long k = (long) a.GetNumRows();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// memory copy might be faster?
memcpy(Data() + j * numRows, a.Data() + j * k + startIndex, sizeof(ElemType) * numRows);
// //four-way unrolling
// for (long i=0, startRow = startIndex; i<(m & ~3); i+=4, startRow+=4)
// {
// us(i,j) = a(startRow,j);
// us(i+1,j) = a(startRow+1,j);
// us(i+2,j) = a(startRow+2,j);
// us(i+3,j) = a(startRow+3,j);
// }
// //handle remaining stuffs
// for (long i=m & ~3, startRow = startIndex+(m & ~3); i<m; i++, startRow++)
// {
// us(i,j) = a(startRow,j);
// }
}
return *this;
}
//for the row slice of this starting from startIndex we add a to it.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.IsEmpty())
LogicError("AddToRowSliceValuesOf: input matrix a is empty.");
if (a.GetNumRows() != numRows)
LogicError("AddToRowSliceValuesOf: a.GetNumRows() != numRows.");
if (startIndex + numRows > GetNumRows())
LogicError("AddToRowSliceValuesOf: startIndex + numRows exceeds GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddToRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(startRow, j) += a(i, j);
us(startRow + 1, j) += a(i + 1, j);
us(startRow + 2, j) += a(i + 2, j);
us(startRow + 3, j) += a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++)
{
us(startRow, j) += a(i, j);
}
}
return *this;
}
//for each column of this, we add row slice of a starting from startIndex
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithRowSliceValuesOf(const CPUMatrix<ElemType>& a, const size_t startIndex, const size_t numRows)
{
if (a.IsEmpty())
LogicError("AddWithRowSliceValuesOf: input matrix a is empty.");
if (GetNumRows() != numRows)
LogicError("AddWithRowSliceValuesOf: GetNumRows() != numRows.");
if (startIndex + numRows > a.GetNumRows())
LogicError("AddWithRowSliceValuesOf: startIndex + numRows exceeds a.GetNumRows().");
if (a.GetNumCols() != GetNumCols())
LogicError("AddWithRowSliceValuesOf: columns does not match.");
long n = (long) a.GetNumCols(), m = (long) numRows;
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0, startRow = (long) startIndex; i < (m & ~3); i += 4, startRow += 4)
{
us(i, j) += a(startRow, j);
us(i + 1, j) += a(startRow + 1, j);
us(i + 2, j) += a(startRow + 2, j);
us(i + 3, j) += a(startRow + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3, startRow = (long) startIndex + (m & ~3); i < m; i++, startRow++)
{
us(i, j) += a(startRow, j);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Diagonal() const
{
if (m_numRows != m_numCols)
LogicError("Diagonal can be called only for square matrix. (rows=%d, cols=%d)", (int) m_numRows, (int) m_numCols);
CPUMatrix<ElemType> diag(1, m_numCols);
auto& us = *this;
#pragma omp parallel for
for (long i = 0; i < m_numRows; i++)
{
diag(0, (size_t) i) = us(i, i);
}
return diag;
}
template <class ElemType>
void CPUMatrix<ElemType>::MinusOneAt(CPUMatrix<ElemType>& c, const size_t position)
{
if (position < c.GetNumElements())
c.Data()[position] -= 1.0;
else
RuntimeError("MinusOneAt: position is out of CPU matrix size");
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignRepeatOf(const CPUMatrix<ElemType>& a, const size_t numRowRepeats, const size_t numColRepeats)
{
if (this == &a)
LogicError("AssignRepeatOf: a is the same as [this]. Does not support inplace repeat.");
if (a.IsEmpty())
LogicError("AssignRepeatOf: Matrix a is empty.");
RequireSize(a.GetNumRows() * numRowRepeats, a.GetNumCols() * numColRepeats);
long n = (long) a.GetNumCols(), m = (long) a.GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long q = 0; q < numColRepeats; q++)
{
for (long p = 0; p < numRowRepeats; p++)
{
long colOffset = q * n;
for (long j = 0; j < n; j++, colOffset++)
{
long rowOffset = p * m;
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4, rowOffset += 4)
{
us(rowOffset, colOffset) = a(i, j);
us(rowOffset + 1, colOffset) = a(i + 1, j);
us(rowOffset + 2, colOffset) = a(i + 2, j);
us(rowOffset + 3, colOffset) = a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++, rowOffset++)
{
us(rowOffset, colOffset) = a(i, j);
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddToRowRepeatValuesOf(const CPUMatrix<ElemType>& a, const size_t numRepeats)
{
if (a.IsEmpty())
LogicError("AddToRowRepeatValuesOf: input matrix a is empty.");
if (a.GetNumRows() != GetNumRows() * numRepeats)
LogicError("AddToRowRepeatValuesOf: a.GetNumRows() != GetNumRows() * numRepeats.");
long n = (long) a.GetNumCols(), m = (long) GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
for (long k = 0; k < numRepeats; k++)
{
us(i, j) += a(k * m + i, j);
us(i + 1, j) += a(k * m + i + 1, j);
us(i + 2, j) += a(k * m + i + 2, j);
us(i + 3, j) += a(k * m + i + 3, j);
}
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
for (long k = 0; k < numRepeats; k++)
{
us(i, j) += a(k * m + i, j);
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber)
{
a;
posNumber;
negNumber;
shiftNumber;
NOT_IMPLEMENTED;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddFoldedPositiveAndShiftedNegSample(const CPUMatrix<ElemType>& a, const size_t posNumber, const size_t negNumber, const size_t shiftNumber)
{
a;
posNumber;
negNumber;
shiftNumber;
NOT_IMPLEMENTED;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Transpose()
{
if (IsEmpty())
LogicError("Transpose: Matrix is empty.");
CPUMatrix<ElemType> c;
c.AssignTransposeOf(*this);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTransposeOf(const CPUMatrix<ElemType>& a)
{
if (this == &a)
LogicError("AssignTransposeOf: a is the same as [this]. Does not support inplace transpose.");
if (a.IsEmpty())
LogicError("AssignTransposeOf: Matrix a is empty.");
RequireSize(a.GetNumCols(), a.GetNumRows());
long n = (long) a.GetNumCols(), m = (long) a.GetNumRows();
auto& us = *this;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(j, i) = a(i, j);
us(j, i + 1) = a(i + 1, j);
us(j, i + 2) = a(i + 2, j);
us(j, i + 3) = a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(j, i) = a(i, j);
}
}
return *this;
}
// dst[i] = src[i] * alpha + dst[i] * beta
// scale a column vector and add it to another
// The usual special case: If beta = 0, then dst[] is not read, and may be uninitialized or NaN.
template <class ElemType>
static void ScaleAndAddColumn(ElemType beta, ElemType* dst, const ElemType* src, size_t numRows, ElemType alpha)
{
if (alpha != 1) // rare case: just do the full thing
for (size_t i = 0; i < numRows; i++)
dst[i] = beta * dst[i] + alpha * src[i];
else if (beta == 1) // used in backprop
for (size_t i = 0; i < numRows; i++)
dst[i] += src[i];
else if (beta == 0) // plain assignment
memcpy(dst, src, sizeof(ElemType) * numRows);
else // alpha=1, arbitrary beta: also rare case
for (size_t i = 0; i < numRows; i++)
dst[i] = beta * dst[i] + src[i];
}
// *this[:,j] = a[:,idx[j]] * alpha + *this[:,j] * beta
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoGatherColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha)
{
if (idx.GetNumRows() != 1) // index is 1-dimensional only
InvalidArgument("DoGatherColumnsOf: Map must be a row vector.");
if (beta)
VerifySize(a.GetNumRows(), idx.GetNumCols());
else
Resize(a.GetNumRows(), idx.GetNumCols());
auto& us = *this;
// race-condition consideration: Since this loops over independent output columns, this has no race condition. Cf. DoScatterColumnsOf().
#pragma omp parallel for // TODO: Depending in circumstance, it may be more efficient to parallelize over rows.
foreach_column(jOut, us)
{
auto jInF = idx(0, jOut); // this is the column we need to get
if (std::isnan(jInF) || jInF < 0) // negative index means gap
continue;
size_t jIn = (size_t)jInF;
if (jIn >= a.GetNumCols())
InvalidArgument("DoGatherColumnsOf: Map out of bounds. %ld >= %ld", (long int)jIn, (long int)a.GetNumCols());
ScaleAndAddColumn(beta, &us(0,jOut), &a(0,jIn), us.GetNumRows(), alpha);
}
return *this;
}
// *this[:,idx[j]] = a[:,j] * alpha + *this[:,idx[j]] * beta
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DoScatterColumnsOf(ElemType beta, const CPUMatrix<ElemType>& idx, const CPUMatrix<ElemType>& a, ElemType alpha)
{
if (idx.GetNumRows() != 1) // index is 1-dimensional only
InvalidArgument("DoScatterColumnsOf: Map must be a row vector.");
if (idx.GetNumCols() != a.GetNumCols())
InvalidArgument("DoScatterColumnsOf: Map must have width of input vector.");
if (a.GetNumRows() != GetNumRows())
InvalidArgument("DoScatterColumnsOf: Output must have same height as input vector.");
auto& us = *this;
// pre-scale with beta upfront
// Scatter may add more than one source column to the same target, so we must pre-scale with beta, and then just keep adding.
Scale(beta, us); // if beta is 0, then this will be a memset()
// race-condition consideration: If idx[] references the same target column multiple times, this can have a race condition,
// and hence cannot use parallelism.
//#pragma omp parallel for // TODO: Depending in circumstance, it may be more efficient to parallelize over rows.
foreach_column(jIn, a)
{
auto jOutF = idx(0, jIn); // this is the column we copy/add into
if (std::isnan(jOutF) || jOutF < 0) // negative index means gap
continue;
size_t jOut = (size_t)jOutF;
if (jOut >= GetNumCols())
InvalidArgument("DoGatherColumnsOf: Map out of bounds.");
ScaleAndAddColumn(/*beta=*/(ElemType)1, &us(0, jOut), &a(0, jIn), us.GetNumRows(), alpha);
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const ElemType v)
{
if (IsEmpty())
LogicError("SetValue: Matrix is empty.");
bool isFinite = std::numeric_limits<ElemType>::is_integer || std::isfinite((double) v);
if (isFinite && v == 0)
{
memset(Data(), 0, sizeof(ElemType) * GetNumElements());
}
else
{
ElemType* bufPtr = Data();
long m = (long) GetNumElements();
// 2-way thread parallelism is sufficient for the memory bound
// operation of just setting the values of an array.
const unsigned SETVALUE_NUM_THREADS = 2;
UNUSED(SETVALUE_NUM_THREADS); // in case OMP is turned off.
#pragma omp parallel for num_threads(SETVALUE_NUM_THREADS)
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
bufPtr[i] = v;
bufPtr[i + 1] = v;
bufPtr[i + 2] = v;
bufPtr[i + 3] = v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
bufPtr[i] = v;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaskColumnsValue(const CPUMatrix<char>& columnsMask, ElemType val, size_t numColsPerMaskEntry)
{
if (GetNumCols() != (columnsMask.GetNumCols() * numColsPerMaskEntry))
RuntimeError("MaskColumnsValue: Matrix number of columns must equal 'column mask number of columns * numColsPerMaskEntry'.");
auto& us = *this;
long n = (long)columnsMask.GetNumCols(), m = (long) GetNumRows();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
if (columnsMask(0, j) == 1)
continue;
for (long k = 0; k < numColsPerMaskEntry; ++k)
{
// four-way unrolling
for (size_t i = 0; i < (m & ~3); i += 4)
{
us(i, (j * numColsPerMaskEntry) + k) = val;
us(i + 1, (j * numColsPerMaskEntry) + k) = val;
us(i + 2, (j * numColsPerMaskEntry) + k) = val;
us(i + 3, (j * numColsPerMaskEntry) + k) = val;
}
// handle remaining
for (size_t i = m & ~3; i < m; i++)
{
us(i, (j * numColsPerMaskEntry) + k) = val;
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const ElemType* colPointer, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
if (colPointer == NULL)
return;
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = colPointer[i];
us(i + 1, j) = colPointer[i + 1];
us(i + 2, j) = colPointer[i + 2];
us(i + 3, j) = colPointer[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = colPointer[i];
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const ElemType val, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = val;
us(i + 1, j) = val;
us(i + 2, j) = val;
us(i + 3, j) = val;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = val;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetColumn(const CPUMatrix<ElemType>& valMat, size_t j)
{
if (IsEmpty())
LogicError("SetColumn: Matrix is empty.");
if (valMat.GetNumRows() != GetNumRows() || valMat.GetNumCols() != 1)
LogicError("The valMat matrix has incorrect number of rows or columns.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = valMat(i, 0);
us(i + 1, j) = valMat(i + 1, 0);
us(i + 2, j) = valMat(i + 2, 0);
us(i + 3, j) = valMat(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = valMat(i, 0);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const CPUMatrix<ElemType>& deepCopyFrom)
{
if (this == &deepCopyFrom)
return;
SetValue(deepCopyFrom.GetNumRows(), deepCopyFrom.GetNumCols(), deepCopyFrom.Data(), 0);
}
#if 0
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const GPUMatrix<ElemType>& /*deepCopyFrom*/)
{
NOT_IMPLEMENTED;
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const CPUSparseMatrix<ElemType>& deepCopyFrom)
{
deepCopyFrom.AssignColumnSliceToDense(*this, 0, deepCopyFrom.GetNumCols());
}
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const GPUSparseMatrix<ElemType>& /*deepCopyFrom*/)
{
NOT_IMPLEMENTED;
}
#endif
template <class ElemType>
void CPUMatrix<ElemType>::SetValue(const size_t numRows, const size_t numCols, ElemType* pArray, const size_t matrixFlags)
{
if (pArray == nullptr && numRows * numCols > 0)
InvalidArgument("Invalid pArray. pArray == nullptr, but matrix is of size %d * %d = %d.", (int)numRows, (int)numCols, (int)(numRows * numCols));
SetFormat(matrixFormatDense);
SetComputeDeviceId(CPUDEVICE);
// if it's externally managed, then populate the structure
if (matrixFlags & matrixFlagDontOwnBuffer)
{
// free previous array allocation if any before overwriting
delete[] Buffer();
m_numRows = numRows;
m_numCols = numCols;
SetBuffer(pArray, GetNumElements() * sizeof(ElemType), true);
SetSizeAllocated(GetNumElements());
}
else
{
RequireSize(numRows, numCols);
if (!IsEmpty())
{
if (!(matrixFlags & matrixFormatRowMajor)) // compatible to internal structure
memcpy(Data(), pArray, GetNumElements() * sizeof(ElemType));
else // need to transpose
{
ElemType* bufPtr = Data();
auto& us = *this;
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, us)
{
cblas_dcopy((int) numRows, reinterpret_cast<double*>(pArray + j), (int) numCols, reinterpret_cast<double*>(bufPtr + LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, us)
{
{
#pragma warning(suppress : 4244)
cblas_scopy((int) numRows, reinterpret_cast<float*>(pArray + j), (int) numCols, reinterpret_cast<float*>(bufPtr + LocateColumn(j)), 1);
}
}
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetDiagonalValue(const ElemType v)
{
if (GetNumRows() != GetNumCols())
LogicError("SetDiagonalValue: NumRows and NumCols do not agree.");
auto& us = *this;
long m = (long) GetNumRows();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = v;
us(i + 1, i + 1) = v;
us(i + 2, i + 2) = v;
us(i + 3, i + 3) = v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = v;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetDiagonalValue(const CPUMatrix<ElemType>& vector)
{
if (IsEmpty() || vector.IsEmpty())
LogicError("SetDiagonalValue: Matrix is empty.");
if (GetNumRows() != GetNumCols())
LogicError("SetDiagonalValue: NumRows and NumCols do not agree.");
if (vector.GetNumRows() != 1 && vector.GetNumCols() != 1)
LogicError("SetDiagonalValue: input vector must be a vector.");
if (vector.GetNumElements() == 1) // reduce to simple form
SetDiagonalValue(vector(0, 0));
else if (vector.GetNumRows() != GetNumRows() && vector.GetNumCols() != GetNumRows())
LogicError("SetDiagonalValue: input vector's dimension does not agree with [this].");
else
{
auto& us = *this;
long m = (long) GetNumRows();
if (vector.GetNumRows() == 1) // row vector
{
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = vector(0, i);
us(i + 1, i + 1) = vector(0, i + 1);
us(i + 2, i + 2) = vector(0, i + 2);
us(i + 3, i + 3) = vector(0, i + 3);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = vector(0, i);
}
}
else
{
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, i) = vector(i, 0);
us(i + 1, i + 1) = vector(i + 1, 0);
us(i + 2, i + 2) = vector(i + 2, 0);
us(i + 3, i + 3) = vector(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, i) = vector(i, 0);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomValue(const ElemType low, const ElemType high, unsigned long seed)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
std::mt19937_64 generator;
generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::uniform_real_distribution<ElemType> r(low, high);
ElemType* bufPtr = Data();
long m = (long) GetNumElements();
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
bufPtr[i] = r(generator);
bufPtr[i + 1] = r(generator);
bufPtr[i + 2] = r(generator);
bufPtr[i + 3] = r(generator);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
bufPtr[i] = r(generator);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::SetGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetUniformRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::normal_distribution<ElemType> r(mean, sigma);
// #pragma omp parallel for // is it thread safe?
foreach_coord (i, j, us)
{
us(i, j) = r(generator);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AddGaussianRandomValue(const ElemType mean, const ElemType sigma, unsigned long seed)
{
if (sigma <= 0)
InvalidArgument("SetUniformRandomValue: sigma must be a positive value.");
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
auto& us = *this;
std::mt19937_64 generator;
generator.seed(seed == USE_TIME_BASED_SEED ? (unsigned long) time(NULL) : seed);
boost::random::normal_distribution<ElemType> r(mean, sigma);
long m = (long) GetNumRows(), n = (long) GetNumCols();
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = r(generator);
us(i + 1, j) = r(generator);
us(i + 2, j) = r(generator);
us(i + 3, j) = r(generator);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = r(generator);
}
}
}
//maskRate: percentage of values masked out (similar to dropout rate)
//scaleValue: which scale value to set to the left ones (unmasked items).
template <class ElemType>
void CPUMatrix<ElemType>::SetUniformRandomMask(const ElemType maskRate, const ElemType scaleValue, RNGHandle& rngHandle)
{
if (IsEmpty())
LogicError("SetUniformRandomValue: Matrix is empty.");
CPURNGHandle* cpuRNGHandle = dynamic_cast<CPURNGHandle*>(&rngHandle);
if (cpuRNGHandle == nullptr)
LogicError("rngHandle must be a CPURNGHandle.");
auto& us = *this;
boost::random::uniform_real_distribution<ElemType> r(0, 1);
long m = (long) GetNumRows(), n = (long) GetNumCols();
ElemType v;
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
v = r(cpuRNGHandle->Generator());
us(i, j) = v <= maskRate ? 0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 1, j) = v <= maskRate ? 0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 2, j) = v <= maskRate ? 0 : scaleValue;
v = r(cpuRNGHandle->Generator());
us(i + 3, j) = v <= maskRate ? 0 : scaleValue;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
v = r(cpuRNGHandle->Generator());
us(i, j) = v <= maskRate ? 0 : scaleValue;
}
}
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::Adagrad(CPUMatrix<ElemType>& gradients, const bool needAveMultiplier)
{
ElemType aveMultiplier = 0;
if (IsEmpty() || gradients.GetNumCols() != GetNumCols() || gradients.GetNumRows() != GetNumRows())
{
RequireSize(gradients.GetNumRows(), gradients.GetNumCols());
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols())
LogicError("The matrix gradients must have the same rows and columns as this matrix.");
ElemType *a = Data(), *d_v = gradients.Data();
size_t n = GetNumElements();
const ElemType floor = 1e-16f;
ElemType a0, a1, a2, a3;
// disable omp here because aveMultiper needs to be added atomically. however, it seems the result is incorrect even if rmp atomic and amp critical are used.
// #pragma omp parallel for
for (long i = 0; i < (n & ~3); i += 4) // four-way unrolling
{
a[i] += d_v[i] * d_v[i];
a[i + 1] += d_v[i + 1] * d_v[i + 1];
a[i + 2] += d_v[i + 2] * d_v[i + 2];
a[i + 3] += d_v[i + 3] * d_v[i + 3];
a0 = sqrt(a[i] + floor);
a1 = sqrt(a[i + 1] + floor);
a2 = sqrt(a[i + 2] + floor);
a3 = sqrt(a[i + 3] + floor);
d_v[i] /= a0;
d_v[i + 1] /= a1;
d_v[i + 2] /= a2;
d_v[i + 3] /= a3;
if (needAveMultiplier)
{
aveMultiplier += 1 / a0 + 1 / a1 + 1 / a2 + 1 / a3;
}
}
// get the last few elements if any
for (long i = n & ~3; i < n; i++)
{
a[i] += d_v[i] * d_v[i];
a0 = sqrt(a[i] + floor);
d_v[i] /= a0;
if (needAveMultiplier)
{
aveMultiplier += 1 / a0;
}
}
if (needAveMultiplier && n > 0)
return aveMultiplier / n;
else
return 1;
}
template <class ElemType>
void CPUMatrix<ElemType>::FSAdagrad(CPUMatrix<ElemType>& gradients,
CPUMatrix<ElemType>& functionValues,
ElemType learnRatePerSample,
ElemType momentum,
ElemType adaWeight,
ElemType adaMul,
bool unitGainMomentum)
{
auto unitGainFactor = ElemType(unitGainMomentum ? (1.0 - momentum) : 1.0);
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothMom = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g;
smoothAda[i] = adaSqr;
if (adaSqr != 0.0f)
{
ElemType ada = sqrt(adaSqr);
ElemType w = adaMul * ((ElemType) 1.0 / ada);
if (w > 10.0f)
w = 10.0f;
g *= w;
}
if (momentum > 0.0f)
{
g = momentum * smoothMom[i] + unitGainFactor * g;
smoothMom[i] = g;
}
g *= learnRatePerSample;
val[i] -= g;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Adam(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learnRatePerSample,
ElemType momentum, ElemType adaWeight, ElemType adaMul, bool unitGainMomentum)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
auto unitGainFactor = ElemType(unitGainMomentum ? (1.0 - momentum) : 1.0);
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothMom = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType adaSqr = adaWeight * smoothAda[i] + (1.0f - adaWeight) * g * g;
smoothAda[i] = adaSqr;
ElemType ada = sqrt(adaSqr);
ElemType w = adaMul * (ElemType)( 1.0 / (ada + 1e-8));
g = momentum * smoothMom[i] + unitGainFactor * g;
smoothMom[i] = g;
val[i] -= g * w * learnRatePerSample;
}
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::RmsProp(CPUMatrix<ElemType>& gradients,
ElemType RMS_GAMMA,
ElemType RMS_WGT_INC,
ElemType RMS_WGT_MAX,
ElemType RMS_WGT_DEC,
ElemType RMS_WGT_MIN,
const bool needAveMultiplier)
{
const ElemType floor = 1e-6f;
size_t n = gradients.GetNumElements();
ElemType* curr_grad = gradients.Data();
if (IsEmpty() || GetNumCols() < gradients.GetNumCols() * 3)
{
RequireSize(gradients.GetNumRows(), gradients.GetNumCols() * 3);
SetValue(0.0);
ElemType* avars = Data(); // accumulated variances for RMS scaling
ElemType* steps = Data() + 2 * n; // current step size
// initialize moving average of gradient-squared
for (long i = 0; i < n; i++)
avars[i] = curr_grad[i] * curr_grad[i];
// initialize starting step size
for (long i = 0; i < n; i++)
steps[i] = ElemType(0.02);
}
ElemType* avars = Data(); // accumulated variances for RMS scaling
ElemType* signs = Data() + n; // sign of previous gradient
ElemType* steps = Data() + 2 * n; // current step size
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != gradients.GetNumCols() * 3)
LogicError("The matrix gradients does not have expected dimensions.");
ElemType ONE_MINUS_GAMMA = ElemType(1.0) - RMS_GAMMA;
// int upd[] = {
// 2,2,0,
// 2,2,0,
// 1,1,1,
// 2,2,0,
// 1,2,1,
// 0,2,2,
// 1,1,1,
// 0,2,2,
// 0,2,2,
// };
// for (long i=0; i<n; i++)
// {
// avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]);
// // grad sign base 3: 0->neg, 1->zero, 2->pos
// const int grad_sign = 1 + (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0));
// // signs[i] contains three consecutive grad_sign
// signs[i] = 3*(int(signs[i]) % 9) + grad_sign;
// switch(upd[int(signs[i])])
// {
// case 0:
// steps[i] = max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN);
// break;
// case 2:
// steps[i] = min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX);
// break;
// }
// curr_grad[i] *= steps[i] / sqrt(avars[i] + floor);
// }
ElemType aveMultiplier = 0, a;
for (long i = 0; i < n; i++)
{
avars[i] = RMS_GAMMA * avars[i] + ONE_MINUS_GAMMA * (curr_grad[i] * curr_grad[i]);
const int grad_sign = (ElemType(0) < curr_grad[i]) - (curr_grad[i] < ElemType(0));
if (signs[i] * grad_sign > 0)
steps[i] = std::min(steps[i] * RMS_WGT_INC, RMS_WGT_MAX);
else
steps[i] = std::max(steps[i] * RMS_WGT_DEC, RMS_WGT_MIN);
a = steps[i] / sqrt(avars[i] + floor);
curr_grad[i] *= a;
signs[i] = (ElemType) grad_sign;
if (needAveMultiplier)
aveMultiplier += a;
}
if (needAveMultiplier)
return aveMultiplier / n;
else
return 1;
}
template <class ElemType>
void CPUMatrix<ElemType>::AdaDelta(CPUMatrix<ElemType>& gradients, CPUMatrix<ElemType>& functionValues, ElemType learningRate, ElemType rho, ElemType epsilon)
{
size_t numColsNeeded = 2 * gradients.GetNumCols();
if (IsEmpty() || (GetNumCols() < numColsNeeded))
{
RequireSize(gradients.GetNumRows(), numColsNeeded);
SetValue(0.0);
}
if (GetNumRows() != gradients.GetNumRows() || GetNumCols() != numColsNeeded)
LogicError("The matrix gradients does not have expected dimensions.");
size_t n = gradients.GetNumElements();
ElemType* grad = gradients.Data();
ElemType* smoothAda = Data();
ElemType* smoothX2 = Data() + n;
ElemType* val = functionValues.Data();
#pragma omp parallel for
// TODO: Unroll 4-times for better performance leveraging vectorization
for (long i = 0; i < n; i++)
{
ElemType g = grad[i];
ElemType adaSqr = rho * smoothAda[i] + (1 - rho) * g * g;
smoothAda[i] = adaSqr;
ElemType x2 = smoothX2[i];
ElemType deltaX = -sqrt(x2 + epsilon) / sqrt(adaSqr + epsilon) * g;
smoothX2[i] = rho * smoothX2[i] + (1 - rho) * deltaX * deltaX;
val[i] += learningRate * deltaX;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Reshape(const size_t numRows, const size_t numCols)
{
if (numRows * numCols != GetNumElements())
InvalidArgument("Reshape: Total number of elements does not match.");
m_numRows = numRows;
m_numCols = numCols;
}
// RequireSize() -- Tests if the matrix is the right size. If not, resizes the matrix. This avoids the VerifyResizable check if we're already the right size.
template <class ElemType>
void CPUMatrix<ElemType>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/)
{
if (GetNumRows() != numRows || GetNumCols() != numCols)
Resize(numRows, numCols, growOnly);
}
// Resize() -- change matrix size
// This function is cheap if the matrix size does not change.
// Current content is not preserved.
// If growOnly is true, resize will not reallocate memory if the current memory is large enough (i.e., will not shrink).
// If this object does not own its memory then new memory cannot be allocated (one can still shrink and/or reshape).
template <class ElemType>
void CPUMatrix<ElemType>::Resize(const size_t numRows, const size_t numCols, bool growOnly /*=true*/)
{
if (GetNumRows() == numRows && GetNumCols() == numCols)
return;
VerifyResizable(__func__);
size_t numElements = numRows * numCols;
if (numElements > GetSizeAllocated() || // grow allocation
(!growOnly && (numElements != GetSizeAllocated()))) // shrink allocation (not if 'growOnly')
{
// reallocate buffer
ElemType* pArray = nullptr;
if (numElements > 0)
{
pArray = NewArray<ElemType>(numElements);
}
// success: update the object
delete[] Buffer();
SetBuffer(pArray, numElements * sizeof(ElemType));
SetSizeAllocated(numElements);
}
// success
m_sliceViewOffset = 0;
m_numRows = numRows;
m_numCols = numCols;
}
// allocated by the callee but should be deleted by the caller
// TODO: change to use STL vector instead
template <class ElemType>
ElemType* CPUMatrix<ElemType>::CopyToArray() const
{
size_t numElements = GetNumElements();
if (numElements != 0)
{
ElemType* arrayCopyTo = NewArray<ElemType>(numElements);
memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements);
return arrayCopyTo;
}
else
{
return nullptr;
}
}
//memory will be allocated by the callee if not enough but need to be deleted by the caller after it's done
//return number of elements copied
template <class ElemType>
size_t CPUMatrix<ElemType>::CopyToArray(ElemType*& arrayCopyTo, size_t& currentArraySize) const
{
size_t numElements = GetNumElements();
if (numElements > currentArraySize)
{
delete arrayCopyTo;
arrayCopyTo = NewArray<ElemType>(numElements);
currentArraySize = numElements;
}
if (numElements != 0)
{
memcpy(arrayCopyTo, Data(), sizeof(ElemType) * numElements);
}
return numElements;
}
template <typename ElemType>
void CPUMatrix<ElemType>::CopySection(size_t /*numRows*/, size_t /*numCols*/, ElemType* /*dst*/, size_t /*colStride*/) const
{
// REVIEW alexeyk: currently not used by CPU, but implement when possible.
RuntimeError("Not implemented.");
}
template <class ElemType>
inline size_t CPUMatrix<ElemType>::LocateColumn(const size_t col) const
{
// For performance reason avoid extra validation in release.
assert(col == 0 || col < GetNumCols());
return col * m_numRows; // matrix in column-wise storage
}
template <class ElemType>
inline size_t CPUMatrix<ElemType>::LocateElement(const size_t row, const size_t col) const
{
// For performance reason avoid extra validation in release.
assert(row < m_numRows);
return LocateColumn(col) + row; // matrix in column-wise storage
}
#pragma endregion Basic Operators
#pragma region Member BLAS Functions
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(ElemType alpha)
{
return AssignSumOf(alpha, *this);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
c.AssignSumOf(alpha, *this);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSumOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = alpha + a(i, j);
us(i + 1, j) = alpha + a(i + 1, j);
us(i + 2, j) = alpha + a(i + 2, j);
us(i + 3, j) = alpha + a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = alpha + a(i, j);
}
}
return *this;
}
//if [this] and a have same dimension then [this]=[this]+a
//if a is a column vector, add to all columns of [this]
//if a is a row vector, add to all rows of [this]
//if a is a scalar, add it to all elements.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator+=(const CPUMatrix<ElemType>& a)
{
// if (a.GetNumElements() == 1)
// *this += a(0,0);
// else
ScaleAndAdd(1, a, *this);
return *this;
}
//if [this] and a have same dimension then OUTPUT=[this]+a
//if a is a column vector, add to all columns of [this]
//if a is a row vector, add to all rows of [this]
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator+(const CPUMatrix<ElemType>& a) const
{
if (GetNumElements() == 1)
{
CPUMatrix<ElemType> c(a);
c += (*this)(0, 0);
return c;
}
else if (a.GetNumElements() == 1)
{
CPUMatrix<ElemType> c(*this);
c += a(0, 0);
return c;
}
else
{
CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code
c += a;
return c;
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.GetNumElements() == 1)
{
SetValue(b);
(*this) += a;
}
else
{
SetValue(a);
(*this) += b;
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(ElemType alpha)
{
return AssignDifferenceOf(*this, alpha);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
c.AssignDifferenceOf(*this, alpha);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = alpha - a(i, j);
us(i + 1, j) = alpha - a(i + 1, j);
us(i + 2, j) = alpha - a(i + 2, j);
us(i + 3, j) = alpha - a(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = alpha - a(i, j);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const ElemType alpha)
{
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) - alpha;
us(i + 1, j) = a(i + 1, j) - alpha;
us(i + 2, j) = a(i + 2, j) - alpha;
us(i + 3, j) = a(i + 3, j) - alpha;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) - alpha;
}
}
return *this;
}
//if [this] and a have same dimension then [this]=[this]-a
//if a is a column vector, minus it from all columns of [this]
//if a is a row vector, minus it from all rows of [this]
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator-=(const CPUMatrix<ElemType>& a)
{
ScaleAndAdd(-1, a, *this);
return *this;
}
//if [this] and a have same dimension then output=[this]-a
//if a is a column vector, minus it from all columns of [this]
//if a is a row vector, minus it from all rows of [this]
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator-(const CPUMatrix<ElemType>& a) const
{
CPUMatrix<ElemType> c(*this); // this implementation will introduce a copy overhead. but make resue of the code
c -= a;
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignDifferenceOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (this != &a)
{
RequireSize(a.GetNumRows(), a.GetNumCols());
SetValue(a);
}
(*this) -= b;
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator*=(ElemType alpha)
{
Scale(alpha, *this);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
Scale(alpha, *this, c);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const ElemType alpha, const CPUMatrix<ElemType>& a)
{
Scale(alpha, a, *this);
return *this;
}
// [this]=a*b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignProductOf(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB)
{
if (a.GetNumElements() == 1)
{
if (transposeB)
AssignTransposeOf(b);
(*this) *= a(0, 0);
}
else if (b.GetNumElements() == 1)
{
if (transposeA)
AssignTransposeOf(a);
(*this) *= b(0, 0);
}
else
Multiply(a, transposeA, b, transposeB, *this);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator*(const CPUMatrix<ElemType>& a) const
{
auto& us = *this;
if (GetNumElements() == 1)
{
CPUMatrix<ElemType> c;
c.AssignProductOf(us(0, 0), a);
return c;
}
else if (a.GetNumElements() == 1)
{
CPUMatrix<ElemType> c;
c.AssignProductOf(a(0, 0), us);
return c;
}
else
{
CPUMatrix<ElemType> c;
Multiply(*this, a, c);
return c;
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator/=(ElemType alpha)
{
(*this) *= 1 / alpha;
return (*this);
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator/(ElemType alpha) const
{
return ((*this) * (1 / alpha));
}
//element-wise power
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::operator^=(ElemType alpha)
{
auto& us = *this;
ElementWisePower(alpha, us, us);
return us;
}
//element-wise power
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::operator^(ElemType alpha) const
{
CPUMatrix<ElemType> c(GetNumRows(), GetNumCols());
ElementWisePower(alpha, *this, c);
return c;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementPowerOf(const CPUMatrix<ElemType>& a, const ElemType power)
{
ElementWisePower(power, a, *this);
return *this;
}
//[this]=[this] .* a (we cannot override operator .* in c++)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
return AssignElementProductOf(*this, a);
}
//[this]=[this] .* a (we cannot override operator .* in c++)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementDivideBy(const CPUMatrix<ElemType>& a)
{
return AssignElementDivisionOf(*this, a);
}
//[this]=a .* b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementProductOf: The input matrix dimensions do not match.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) * b(i, j);
us(i + 1, j) = a(i + 1, j) * b(i + 1, j);
us(i + 2, j) = a(i + 2, j) * b(i + 2, j);
us(i + 3, j) = a(i + 3, j) * b(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) * b(i, j);
}
}
return *this;
}
//[this] +=a .* b
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddElementProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AddElementProductOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AddElementProductOf : The input matrix dimensions do not match.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == GetNumCols()))
InvalidArgument("AddElementProductOf : The input matrix dimensions do not match [this].");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) += a(i, j) * b(i, j);
us(i + 1, j) += a(i + 1, j) * b(i + 1, j);
us(i + 2, j) += a(i + 2, j) * b(i + 2, j);
us(i + 3, j) += a(i + 3, j) * b(i + 3, j);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) += a(i, j) * b(i, j);
}
}
return *this;
}
//[this]=a ./ b
// TODO: This clips the divisor by a small value. Is that really what one would want?
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementDivisionOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementDivisionOf: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementDivisionOf : The input matrix dimensions do not match.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
ElemType smallValue = EPS_IN_INVERSE;
#pragma omp parallel for
foreach_coord (i, j, us)
{
ElemType v = b(i, j);
if (v >= 0 && v < smallValue)
us(i, j) = a(i, j) / smallValue;
else if (v < 0 && v > -smallValue)
us(i, j) = a(i, j) / (-smallValue);
else
us(i, j) = a(i, j) / v;
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("ColumnElementMultiplyWith: Matrix is empty.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1))
InvalidArgument("ColumnElementMultiplyWith: The input matrix should be a col vector and match [this]'s rows.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) *= a(i, 0);
us(i + 1, j) *= a(i + 1, 0);
us(i + 2, j) *= a(i + 2, 0);
us(i + 3, j) *= a(i + 3, 0);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) *= a(i, 0);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementMultiplyWith(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("RowElementMultiplyWith: Matrix is empty.");
if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols()))
InvalidArgument("RowElementMultiplyWith: The input matrix should be a row vector and match [this]'s columns.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
ElemType v = a(0, j);
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) *= v;
us(i + 1, j) *= v;
us(i + 2, j) *= v;
us(i + 3, j) *= v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) *= v;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::RowElementDivideBy(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("RowElementDivideBy: Matrix is empty.");
if (!(a.GetNumRows() == 1 && a.GetNumCols() == GetNumCols()))
InvalidArgument("RowElementDivideBy: The input matrix should be a row vector and match [this]'s columns.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
ElemType v = a(0, j);
if (v >= 0 && v < EPS_IN_INVERSE)
v = EPS_IN_INVERSE;
else if (v < 0 && v > -EPS_IN_INVERSE)
v = (-EPS_IN_INVERSE);
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) /= v;
us(i + 1, j) /= v;
us(i + 2, j) /= v;
us(i + 3, j) /= v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) /= v;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ColumnElementDivideBy(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty() || IsEmpty())
LogicError("ColumnElementDivideBy: Matrix is empty.");
if (!(a.GetNumRows() == GetNumRows() && a.GetNumCols() == 1))
InvalidArgument("ColumnElementDivideBy: The input matrix should be a col vector and match [this]'s rows.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
ElemType smallValue = EPS_IN_INVERSE;
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
for (long i = 0; i < m; i++)
{
ElemType v = a(i, 0);
if (v >= 0 && v < smallValue)
us(i, j) /= smallValue;
else if (v < 0 && v > -smallValue)
us(i, j) /= (-smallValue);
else
us(i, j) /= v;
}
}
return *this;
}
//[this]=1 ./ a
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::ElementInverse()
{
return AssignElementInverseOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementInverseOf(const CPUMatrix<ElemType>& a)
{
ElemType smallValue = EPS_IN_INVERSE;
if (a.IsEmpty())
LogicError("AssignElementInverseOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (a(i, j) < 0 && a(i, j) > -smallValue)
us(i, j) = 1 / (-smallValue);
else if (a(i, j) >= 0 && a(i, j) < smallValue)
us(i, j) = 1 / smallValue;
else
us(i, j) = 1 / a(i, j);
}
return *this;
}
//[this]=sigmoid([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoid()
{
return AssignSigmoidOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSigmoidOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (a(i, j) >= 0)
us(i, j) = 1 / (1 + exp(-a(i, j)));
else
{
ElemType v = exp(a(i, j));
us(i, j) = v / (1 + v);
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLinearRectifierDerivative()
{
return AssignLinearRectifierDerivativeOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLinearRectifierDerivativeOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLinearRectifierDerivativeOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f;
us(i + 1, j) = a(i + 1, j) > 0.0f ? 1.0f : 0.0f;
us(i + 2, j) = a(i + 2, j) > 0.0f ? 1.0f : 0.0f;
us(i + 3, j) = a(i + 3, j) > 0.0f ? 1.0f : 0.0f;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = a(i, j) > 0.0f ? 1.0f : 0.0f;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSigmoidDerivative()
{
return AssignSigmoidDerivativeOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSigmoidDerivativeOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSigmoidDerivativeOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
ElemType v = a(i, j);
us(i, j) = v * (1 - v);
ElemType v1 = a(i + 1, j);
us(i + 1, j) = v1 * (1 - v1);
ElemType v2 = a(i + 2, j);
us(i + 2, j) = v2 * (1 - v2);
ElemType v3 = a(i + 3, j);
us(i + 3, j) = v3 * (1 - v3);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
ElemType v = a(i, j);
us(i, j) = v * (1 - v);
}
}
return *this;
}
//[this]=tanh([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTanh()
{
return AssignTanhOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTanhOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignTanhOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = tanh(a(i, j));
us(i + 1, j) = tanh(a(i + 1, j));
us(i + 2, j) = tanh(a(i + 2, j));
us(i + 3, j) = tanh(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = tanh(a(i, j));
}
}
return *this;
}
//[this]=softmax([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLogSoftmax(const bool isColWise)
{
return AssignLogSoftmaxOf(*this, isColWise);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogSoftmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise)
{
if (a.IsEmpty())
LogicError("AssignLogSoftmaxOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
if (isColWise)
{
#pragma omp parallel for
foreach_column (j, a)
{
// we need to extract max before applying exp to avoid overflow
ElemType maxV = a(0, j);
foreach_row (i, a)
maxV = std::max(maxV, a(i, j));
ElemType sum = 0;
foreach_row (i, a)
sum += exp(us(i, j) = a(i, j) - maxV);
sum = log(sum);
foreach_row (i, us)
us(i, j) -= sum;
}
}
else
{
#pragma omp parallel for
foreach_row (i, a)
{
// we need to extract max before applying exp to avoid overflow
ElemType maxV = a(i, 0);
foreach_column (j, a)
maxV = std::max(maxV, a(i, j));
ElemType sum = 0;
foreach_column (j, a)
sum += exp(us(i, j) = a(i, j) - maxV);
sum = log(sum);
foreach_column (j, us)
us(i, j) -= sum;
}
}
return *this;
}
//[this]=hardmax([this])
//the max element is 1 else is 0
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceHardmax(const bool isColWise)
{
return AssignHardmaxOf(*this, isColWise);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignHardmaxOf(const CPUMatrix<ElemType>& a, const bool isColWise)
{
if (a.IsEmpty())
LogicError("AssignHardmaxOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
if (isColWise)
{
#pragma omp parallel for
foreach_column (j, a)
{
// we need to extract max
ElemType maxV = a(0, j);
long maxI = 0;
foreach_row (i, a)
{
if (maxV < a(i, j))
{
maxV = a(i, j);
maxI = i;
}
}
foreach_row (i, us)
us(i, j) = (i == maxI) ? 1.0f : 0.0f;
}
}
else
{
#pragma omp parallel for
foreach_row (i, a)
{
// we need to extract max
ElemType maxV = a(i, 0);
long maxJ = 0;
foreach_column (j, a)
{
if (maxV < a(i, j))
{
maxV = a(i, j);
maxJ = j;
}
}
foreach_column (j, us)
us(i, j) = (j == maxJ) ? 1.0f : 0.0f;
}
}
return *this;
}
//[this]=sqrt([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSqrt()
{
return AssignSqrtOf(*this);
}
//to prevent negative values caused by floating operations, we force inputs to be >=0
//this may, however, hide problems in the caller.
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSqrtOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSqrtOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = sqrt(max((ElemType)0, a(i, j)));
us(i + 1, j) = sqrt(max((ElemType)0, a(i + 1, j)));
us(i + 2, j) = sqrt(max((ElemType)0, a(i + 2, j)));
us(i + 3, j) = sqrt(max((ElemType)0, a(i + 3, j)));
}
// remaining
for (long i = m & ~3; i < m; i++)
{
us(i, j) = sqrt(max((ElemType)0, a(i, j)));
}
}
return *this;
}
//[this]=exp([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceExp()
{
return AssignExpOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignExpOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignExpOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = exp(a(i, j));
us(i + 1, j) = exp(a(i + 1, j));
us(i + 2, j) = exp(a(i + 2, j));
us(i + 3, j) = exp(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = exp(a(i, j));
}
}
return *this;
}
//[this]=exp([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceAbs()
{
return AssignAbsOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAbsOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignAbsOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
us(i, j) = abs(a(i, j));
us(i + 1, j) = abs(a(i + 1, j));
us(i + 2, j) = abs(a(i + 2, j));
us(i + 3, j) = abs(a(i + 3, j));
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
us(i, j) = abs(a(i, j));
}
}
return *this;
}
//[this]=log([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog()
{
return AssignLogOf(*this);
}
//[this]=log([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceLog10()
{
return AssignLog10Of(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLogOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLogOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
if (v < EPS_IN_LOG)
{
us(i, j) = LOG_OF_EPS_IN_LOG;
}
else
us(i, j) = log(v);
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignLog10Of(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignLogOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
if (v <= 0)
LogicError("AssignLogOf: Log can only applied to numbers larger than 0.");
else if (v < EPS_IN_LOG)
{
us(i, j) = LOG10_OF_EPS_IN_LOG;
}
else
us(i, j) = log10(v);
}
return *this;
}
//[this]=cos([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceCosine()
{
return AssignCosineOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCosineOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCosineOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = cos(v);
}
return *this;
}
//[this]=-sin([this]) element wise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceNegativeSine()
{
return AssignNegativeSineOf(*this);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNegativeSineOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignCosineOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
const ElemType v = a(i, j);
us(i, j) = -sin(v);
}
return *this;
}
//Threshold truncating: this[i] = max( this[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateBottom(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncateBottom: Matrix is empty.");
auto& us = *this;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
if (us(i, j) < threshold)
us(i, j) = threshold;
if (us(i + 1, j) < threshold)
us(i + 1, j) = threshold;
if (us(i + 2, j) < threshold)
us(i + 2, j) = threshold;
if (us(i + 3, j) < threshold)
us(i + 3, j) = threshold;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (us(i, j) < threshold)
us(i, j) = threshold;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncate(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncate: Matrix is empty.");
auto& us = *this;
ElemType locThresholdPos = abs(threshold);
ElemType locTHresholdNeg = -locThresholdPos;
long m = (long) GetNumRows(), n = (long) GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
if (us(i, j) > locThresholdPos)
us(i, j) = locThresholdPos;
else if (us(i, j) < locTHresholdNeg)
us(i, j) = locTHresholdNeg;
if (us(i + 1, j) > locThresholdPos)
us(i + 1, j) = locThresholdPos;
else if (us(i + 1, j) < locTHresholdNeg)
us(i + 1, j) = locTHresholdNeg;
if (us(i + 2, j) > locThresholdPos)
us(i + 2, j) = locThresholdPos;
else if (us(i + 2, j) < locTHresholdNeg)
us(i + 2, j) = locTHresholdNeg;
if (us(i + 3, j) > locThresholdPos)
us(i + 3, j) = locThresholdPos;
else if (us(i + 3, j) < locTHresholdNeg)
us(i + 3, j) = locTHresholdNeg;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (us(i, j) > locThresholdPos)
us(i, j) = locThresholdPos;
else if (us(i, j) < locTHresholdNeg)
us(i, j) = locTHresholdNeg;
}
}
return *this;
}
//x= x-threshold if x>threshold, x+threshold if x<-threshold, 0 otherwise
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceSoftThreshold(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncate: Matrix is empty.");
long m = (long) GetNumElements();
ElemType* bufPtr = Data();
#pragma omp parallel for
for (long i = 0; i < (m & ~3); i += 4) // four-way unrolling
{
if (bufPtr[i] > threshold)
bufPtr[i] -= threshold;
else if (bufPtr[i] < -threshold)
bufPtr[i] += threshold;
else
bufPtr[i] = 0;
if (bufPtr[i + 1] > threshold)
bufPtr[i + 1] -= threshold;
else if (bufPtr[i + 1] < -threshold)
bufPtr[i + 1] += threshold;
else
bufPtr[i + 1] = 0;
if (bufPtr[i + 2] > threshold)
bufPtr[i + 2] -= threshold;
else if (bufPtr[i + 2] < -threshold)
bufPtr[i + 2] += threshold;
else
bufPtr[i + 2] = 0;
if (bufPtr[i + 3] > threshold)
bufPtr[i + 3] -= threshold;
else if (bufPtr[i + 3] < -threshold)
bufPtr[i + 3] += threshold;
else
bufPtr[i + 3] = 0;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
if (bufPtr[i] > threshold)
bufPtr[i] -= threshold;
else if (bufPtr[i] < -threshold)
bufPtr[i] += threshold;
else
bufPtr[i] = 0;
}
return *this;
}
//Threshold truncating: this[i] = max( a[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateBottomOf(const CPUMatrix<ElemType>& a, const ElemType threshold)
{
if (a.IsEmpty())
LogicError("AssignTruncateBottomOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (a(i, j) < threshold)
us(i, j) = threshold;
else
us(i, j) = a(i, j);
}
return *this;
}
//Threshold truncating: this[i] = min( this[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::InplaceTruncateTop(const ElemType threshold)
{
if (IsEmpty())
LogicError("InplaceTruncateTop: Matrix is empty.");
auto& us = *this;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (us(i, j) > threshold)
us(i, j) = threshold;
}
return *this;
}
//Threshold truncating: this[i] = min( a[i], threshold )
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignTruncateTopOf(const CPUMatrix<ElemType>& a, const ElemType threshold)
{
if (a.IsEmpty())
LogicError("AssignTruncateTopOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (a(i, j) > threshold)
us(i, j) = threshold;
else
us(i, j) = a(i, j);
}
return *this;
}
//Threshold truncating: this[i] = 0 if abs(this[i]<threshold).
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::SetToZeroIfAbsLessThan(const ElemType threshold)
{
if (IsEmpty())
LogicError("SetToZeroIfAbsLessThan: Matrix is empty.");
auto& us = *this;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (abs(us(i, j)) < threshold)
us(i, j) = 0;
}
return *this;
}
//sum of all abs(elements)
template <class ElemType>
ElemType CPUMatrix<ElemType>::SumOfAbsElements() const
{
if (IsEmpty())
LogicError("SumOfAbsElements: Matrix is empty.");
if (sizeof(ElemType) == sizeof(double))
{
return (ElemType) cblas_dasum((int) GetNumElements(), reinterpret_cast<double*>(Data()), 1);
}
else
{
#pragma warning(suppress : 4244)
return cblas_sasum((int) GetNumElements(), reinterpret_cast<float*>(Data()), 1);
}
}
//sum of all elements
template <class ElemType>
ElemType CPUMatrix<ElemType>::SumOfElements() const
{
if (IsEmpty())
LogicError("SumOfElements: Matrix is empty.");
ElemType sum = 0;
long m = (long) GetNumElements(); // note: OpenMP requires loop indices to be long, not size_t
ElemType* bufPtr = Data();
//four-way unrolling
#pragma omp parallel for reduction(+ : sum)
for (long i = 0; i < (m & ~3); i += 4)
{
sum += bufPtr[i] + bufPtr[i + 1] + bufPtr[i + 2] + bufPtr[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
sum += bufPtr[i];
}
return sum;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSumOfElements(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSumOfElements: Matrix a is empty.");
auto& us = *this;
us.RequireSize(1, 1);
us(0, 0) = a.SumOfElements();
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignOneHot(const CPUMatrix<ElemType>& a, vector<size_t>& shape, size_t axis)
{
if (a.IsEmpty())
LogicError("AssignOneHot: Matrix a is empty.");
if (axis >= shape.size())
LogicError("AssignOneHot: axis is not correct");
size_t item_size = 1;
for (size_t i = 0; i < shape.size() && i < axis; i++)
item_size *= shape[i];
size_t num_class = shape[axis];
auto& us = *this;
auto nCols = a.GetNumCols();
auto nRows = num_class * a.GetNumRows();
us.RequireSize(nRows, nCols);
ElemType* bufPtr = Data();
ElemType* aBufPtr = a.Data();
memset(bufPtr, 0, sizeof(ElemType) * nRows *nCols);
#pragma omp parallel for
for (long i = 0; i < a.GetNumElements(); i++)
{
if (aBufPtr[i] >= 0 && aBufPtr[i] < num_class)
{
size_t block_id = i / item_size;
size_t item_id = i % item_size;
bufPtr[block_id * num_class * item_size + item_id + item_size * (size_t)aBufPtr[i]] = 1;
}
}
return *this;
}
template <class ElemType>
bool CPUMatrix<ElemType>::IsEqualTo(const CPUMatrix<ElemType>& a, const ElemType threshold /*= 1e-8*/) const
{
return AreEqual(*this, a, threshold);
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorSum(const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty())
LogicError("VectorSum: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
#pragma omp parallel for
foreach_column (j, a)
{
ElemType v = 0;
foreach_row (i, a)
{
#pragma omp atomic
v += a(i, j);
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
#pragma omp parallel for
foreach_row (i, a)
{
ElemType v = 0;
foreach_column (j, a)
{
#pragma omp atomic
v += a(i, j);
}
c(i, 0) = v;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNorm1(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNorm1: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
#pragma omp parallel for
foreach_column (j, us)
{
ElemType v = 0;
foreach_row (i, us)
{
#pragma omp atomic
v += abs(us(i, j));
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
#pragma omp parallel for
foreach_row (i, us)
{
ElemType v = 0;
foreach_column (j, us)
{
#pragma omp atomic
v += abs(us(i, j));
}
c(i, 0) = v;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm1Of(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNorm1(*this, isColWise);
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNorm2(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNorm2: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
ElemType* bufPtr = us.Data();
if (isColWise) // col-wise
{
c.RequireSize(1, n);
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, c)
{
c(0, j) = (ElemType) cblas_dnrm2(m, reinterpret_cast<double*>(bufPtr + us.LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
c(0, j) = cblas_snrm2(m, reinterpret_cast<float*>(bufPtr + us.LocateColumn(j)), 1);
}
}
}
else
{
c.RequireSize(m, 1);
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = cblas_dnrm2(n, reinterpret_cast<double*>(bufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_snrm2(n, reinterpret_cast<float*>(bufPtr + i), m);
}
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNorm2Of(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNorm2(*this, isColWise);
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorNormInf(CPUMatrix<ElemType>& c, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorNormInf: Matrix is empty.");
auto& us = *this;
const int m = (int) us.GetNumRows();
const int n = (int) us.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
c.RequireSize(1, n);
// #pragma omp parallel for
foreach_column (j, us)
{
ElemType v = 0;
foreach_row (i, us)
{
v = std::max(v, abs(us(i, j)));
}
c(0, j) = v;
}
}
else
{
c.RequireSize(m, 1);
// #pragma omp parallel for
foreach_row (i, us)
{
ElemType v = 0;
foreach_column (j, us)
{
v = std::max(v, abs(us(i, j)));
}
c(i, 0) = v;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignVectorNormInfOf(CPUMatrix<ElemType>& a, const bool isColWise)
{
a.VectorNormInf(*this, isColWise);
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignInnerProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool isColWise)
{
InnerProduct(a, b, *this, isColWise);
return *this;
}
//column-wise crossproduct
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignKhatriRaoProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignKhatriRaoProductOf: Matrix is empty.");
long cols = (long) a.GetNumCols();
if (cols != b.GetNumCols())
InvalidArgument("a.GetNumCols() != b.GetNumCols()");
long rowsA = (long) a.GetNumRows();
long rowsB = (long) b.GetNumRows();
RequireSize(rowsA * rowsB, cols);
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
for (long k = 0; k < cols; k++)
{
long jj = 0;
for (long j = 0; j < rowsB; j++)
{
for (long i = 0; i < rowsA; i++)
{
(*this)(jj++, k) = a(i, k) * b(j, k);
}
}
}
return *this;
}
//column-wise reshaped product. Used to compute KhatriRaoProduct Gradient
// this = reshape each column of a from (K1xK2,1) to (K1, K2)
// if each column of a is not transposed, each (K1, K2) times each column of b (K2, frames).
// the output is a (K1, frames) matrix
// if each column of a is tranposed, each (K1, K2)^T times each column of b(K1, frames) and output is (K2, frames)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddColumnReshapeProductOf(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const bool transposeAColumn)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AddColumnReshapeProductOf: Matrix is empty.");
long cols = (long) a.GetNumCols();
if (cols != b.GetNumCols())
InvalidArgument("AddColumnReshapeProductOf: a.GetNumCols() != b.GetNumCols()");
long rowsA = (long) a.GetNumRows();
long rowsB = (long) b.GetNumRows();
if (rowsA % rowsB != 0)
InvalidArgument("AddColumnReshapeProductOf: number of rows in a should be multiples of that in b.");
long rowsC = rowsA / rowsB;
if (rowsC != GetNumRows() || cols != GetNumCols())
InvalidArgument("AddColumnReshapeProductOf: This matrix does not have the right size.");
auto& us = *this;
if (transposeAColumn)
{
// find nrows and ncols of tbe reshaped a
long nrows = rowsB;
long ncols = rowsC;
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
foreach_column (t, a)
{
size_t k = 0;
for (size_t j = 0; j < ncols; j++) // row and col is transposed
{
ElemType v = 0;
for (size_t i = 0; i < nrows; i++)
{
v += a(k, t) * b(i, t);
k++;
}
us(j, t) += v;
}
}
}
else
{
size_t ncols = rowsB;
size_t nrows = rowsC;
#ifdef __INTEL_COMPILER // TODO: check this
#pragma simd statement
#endif
#pragma omp parallel for
foreach_column (t, a)
{
size_t k = 0;
for (size_t j = 0; j < ncols; j++)
{
for (size_t i = 0; i < nrows; i++)
{
us(i, t) += a(k, t) * b(j, t);
k++;
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddWithScaleOf(ElemType alpha, const CPUMatrix<ElemType>& a)
{
ScaleAndAdd(alpha, a, *this);
return *this;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::FrobeniusNorm() const
{
if (IsEmpty())
LogicError("FrobeniusNorm: Matrix is empty.");
ElemType v = 0;
long m = (long) GetNumElements();
ElemType* bufPtr = Data();
//four-way unrolling
#pragma omp parallel for reduction(+ : v)
for (long i = 0; i < (m & ~3); i += 4)
{
v += bufPtr[i] * bufPtr[i] + bufPtr[i + 1] * bufPtr[i + 1] + bufPtr[i + 2] * bufPtr[i + 2] + bufPtr[i + 3] * bufPtr[i + 3];
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
v += bufPtr[i] * bufPtr[i];
}
return sqrt(v);
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignFrobeniusNormOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignFrobeniusNormOf: Matrix a is empty.");
auto& us = *this;
us.RequireSize(1, 1);
us(0, 0) = a.FrobeniusNorm();
return us;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNormInf() const
{
if (IsEmpty())
LogicError("MatrixNormInf: Matrix is empty.");
auto& us = *this;
ElemType v = 0;
#pragma omp parallel for
foreach_coord (i, j, us)
{
#pragma omp critical
{
v = std::max(v, abs(us(i, j)));
}
}
return v;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNorm0() const
{
if (IsEmpty())
LogicError("MatrixNorm0: Matrix is empty.");
auto& us = *this;
ElemType v = 0;
#pragma omp parallel for
foreach_coord (i, j, us)
{
if (us(i, j) != 0)
{
#pragma omp critical
{
++v;
}
}
}
return v;
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::MatrixNorm1() const
{
if (IsEmpty())
LogicError("MatrixNorm1: Matrix is empty.");
auto& us = *this;
ElemType sum = 0;
#pragma omp parallel for reduction(+ : sum)
foreach_coord (i, j, us)
{
sum += abs(us(i, j));
}
return sum;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSignOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AssignSignOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_column (j, us)
{
foreach_row (i, us)
{
ElemType v = a(i, j);
if (!std::isnan(v))
us(i, j) = (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1)));
else
us(i, j) = v;
}
}
return us;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddSignOf(const CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("AddSignOf: Matrix a is empty.");
auto& us = *this;
if (this != &a)
RequireSize(a.GetNumRows(), a.GetNumCols());
#pragma omp parallel for
foreach_column (j, us)
{
foreach_row (i, us)
{
ElemType v = a(i, j);
if (!std::isnan(v))
us(i, j) += (v == (ElemType) 0 ? (ElemType) 0 : (v > 0 ? (ElemType) 1 : (ElemType)(-1)));
else
us(i, j) = v;
}
}
return us;
}
//I decided to use CPUMatrix<ElemType>& maxIndexes instead of integer vector because the result may be used to do additional calculation
template <class ElemType>
void CPUMatrix<ElemType>::VectorMax(CPUMatrix<ElemType>& maxIndexes, CPUMatrix<ElemType>& maxValues, const bool isColWise, int topK) const
{
if (IsEmpty())
LogicError("VectorMax: Matrix is empty.");
auto& us = *this;
const int m = (int) GetNumRows();
const int n = (int) GetNumCols();
if (topK > m)
InvalidArgument("VectorMax: TopK must be less or equal than the number of rows");
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
maxValues.RequireSize(topK, n);
maxIndexes.RequireSize(topK, n);
if (topK == 1)
{
#pragma omp parallel for
for (int j = 0; j < n; j++)
{
ElemType v = us(0, j);
size_t index = 0;
foreach_row (i, us)
{
if (v < us(i, j))
{
index = i;
v = us(i, j);
}
}
maxValues(0, j) = v;
maxIndexes(0, j) = (ElemType) index;
}
}
else
{
std::vector<int> indices(m);
int i = 0;
std::generate(indices.begin(), indices.end(), [&i]
{
return i++;
});
const ElemType* curVal = Data();
ElemType* curIdx = maxIndexes.Data();
ElemType* curMax = maxValues.Data();
for (int icol = 0; icol < n; icol++, curVal += m, curIdx += topK, curMax += topK)
{
// Partial sort, descending order.
std::nth_element(indices.begin(), indices.begin() + topK, indices.end(),
[curVal](const int& a, const int& b)
{
return curVal[a] > curVal[b];
});
// REVIEW alexeyk: the following produces warning (see SCL_SECURE_NO_WARNINGS) so use loop instead.
// std::transform(indices.begin(), indices.begin() + topK, curIdx, [](const int& a) { return static_cast<ElemType>(a); });
for (int i2 = 0; i2 < topK; i2++)
{
curIdx[i2] = static_cast<ElemType>(indices[i2]);
curMax[i2] = curVal[indices[i2]];
}
}
}
}
else
{
if (topK > 1)
RuntimeError("Row-wise TopK max is not supported.");
maxValues.RequireSize(m, 1);
maxIndexes.RequireSize(m, 1);
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
ElemType v = us(i, 0);
size_t index = 0;
foreach_column (j, us)
{
if (v < us(i, j))
{
index = j;
v = us(i, j);
}
}
maxValues(i, 0) = v;
maxIndexes(i, 0) = (ElemType) index;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::VectorMin(CPUMatrix<ElemType>& minIndexes, CPUMatrix<ElemType>& minValues, const bool isColWise) const
{
if (IsEmpty())
LogicError("VectorMin: Matrix is empty.");
auto& us = *this;
const int m = (int) GetNumRows();
const int n = (int) GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
if (isColWise) // col-wise
{
minValues.RequireSize(1, n);
minIndexes.RequireSize(1, n);
#pragma omp parallel for
for (int j = 0; j < n; j++)
{
ElemType v = us(0, j);
size_t index = 0;
foreach_row (i, us)
{
if (v > us(i, j))
{
index = i;
v = us(i, j);
}
}
minValues(0, j) = v;
minIndexes(0, j) = (ElemType) index;
}
}
else
{
minValues.RequireSize(m, 1);
minIndexes.RequireSize(m, 1);
#pragma omp parallel for
for (int i = 0; i < m; i++)
{
ElemType v = us(i, 0);
size_t index = 0;
foreach_column (j, us)
{
if (v > us(i, j))
{
index = j;
v = us(i, j);
}
}
minValues(i, 0) = v;
minIndexes(i, 0) = (ElemType) index;
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNumOfDiff(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, bool searchInCol)
{
if (a.GetNumCols() != b.GetNumCols())
throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of columns.");
if (!searchInCol && a.GetNumRows() != b.GetNumRows())
throw std::invalid_argument("AssignNumOfDiff: a and b must have the same number of rows.");
ElemType n = 0;
if (!searchInCol)
{
foreach_coord (i, j, a)
{
n += (a(i, j) != b(i, j));
}
}
else
{
size_t crow = b.GetNumRows();
const ElemType* curCol = b.Data();
for (size_t icol = 0; icol < a.GetNumCols(); icol++, curCol += crow)
{
auto res = std::find(curCol, curCol + crow, a(0, icol));
if (res == curCol + crow)
n++;
}
}
RequireSize(1, 1); // result should be one element
(*this)(0, 0) = n;
return *this;
}
#pragma endregion Member BLAS Functions
#pragma region Other helper Functions
struct PrintRange
{
// print from begin to skipBegin, then from skipEnd to end
// skipBegin = end if no split
size_t begin;
size_t skipBegin;
size_t skipEnd;
size_t end;
bool IsEmpty() const { return end <= begin; }
// examples:
// * 3..10
// * -3..-3: include end-3..end and 0..3
PrintRange(ptrdiff_t first, ptrdiff_t last, size_t total)
{
if (first >= 0 && last >= 0)
{
begin = (size_t)first;
end = (size_t)last + 1;
if (end > total) // allow INT_MAX, meaning to end
end = total;
skipBegin = end;
skipEnd = end;
}
else if (first < 0 && last < 0)
{
begin = 0;
skipBegin = (size_t)(-last);
skipEnd = (size_t)(total + first);
if (skipEnd <= skipBegin)
skipBegin = skipEnd = total;
end = total;
}
else // if other combinations are ever of interest then implement them here
LogicError("Print: Bounds must be either both positive or both negative.");
}
};
// use negative ranges to print corners, e.g. Print("name", -3, -3, -3, -3) will print the first 3 and last 3 rows/cols
template <class ElemType>
void CPUMatrix<ElemType>::Print(const char* matrixName, ptrdiff_t rowFirst, ptrdiff_t rowLast, ptrdiff_t colFirst, ptrdiff_t colLast) const
{
fprintf(stderr, "\n###### ");
if (matrixName != nullptr)
fprintf(stderr, "%s ", matrixName);
fprintf(stderr, "(%lu, %lu)", (unsigned long)GetNumRows(), (unsigned long)GetNumCols());
if (rowFirst != 0 || colFirst != 0 || (size_t)(rowLast + 1) != GetNumRows() || (size_t)(colLast + 1) != GetNumCols())
fprintf(stderr, " [%ld:%ld, %ld:%ld]", (long)rowFirst, (long)rowLast, (long)colFirst, (long)colLast);
fprintf(stderr, " ######\n\n");
if (IsEmpty())
{
fprintf(stderr, "(empty)\n");
return;
}
PrintRange rowRange(rowFirst, rowLast, GetNumRows());
PrintRange colRange(colFirst, colLast, GetNumCols());
if (rowRange.IsEmpty() || colRange.IsEmpty())
{
fprintf(stderr, "(empty)\n");
return;
}
const auto& us = *this;
if (rowRange.begin > 0)
fprintf(stderr, "...\n");
for (size_t i = rowRange.begin; i < rowRange.end; i++)
{
if (i == rowRange.skipBegin) // insert ... between the two blocks if any
{
fprintf(stderr, "...\n");
i = rowRange.skipEnd;
}
if (colRange.begin > 0) // ... at line start
fprintf(stderr, "...\t");
for (size_t j = colRange.begin; j < colRange.end; j++)
{
if (j == colRange.skipBegin)
{
fprintf(stderr, "...\t");
j = colRange.skipEnd;
}
fprintf(stderr, "%.10f\t", us(i, j));
}
if (colRange.end < GetNumCols()) // ... at line end
fprintf(stderr, "...");
fprintf(stderr, "\n");
}
if (rowRange.end < GetNumRows())
fprintf(stderr, "...\n");
}
template <class ElemType>
void CPUMatrix<ElemType>::Print(const char* matrixName /*=nullptr*/) const
{
Print(matrixName, 0, GetNumRows() - 1, 0, GetNumCols() - 1);
}
// file I/O
//matrixName is used to verify that correct matrix is read.
template <class ElemType>
void CPUMatrix<ElemType>::ReadFromFile(FILE*, const char* /*matrixName*/)
{
RuntimeError("not implemented.");
}
//matrixName is used to verify that correct matrix is read.
template <class ElemType>
void CPUMatrix<ElemType>::WriteToFile(FILE*, const char* /*matrixName*/)
{
RuntimeError("not implemented.");
}
//assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignPackedConvolutionInput(const CPUMatrix<ElemType>& inputSubBatch,
const size_t inputWidth, const size_t inputHeight, const size_t inputChannels,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/,
const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample,
const bool zeroPadding)
{
if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth)
LogicError("Arguments verticalSubsample (or horitzontalSubsample) must be less or equal than kernelHeight (or kernelWidth).");
const size_t packedInputRows = kernelWidth * kernelHeight * inputChannels;
const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel
const size_t inputDim = inputWidth * inputHeight * inputChannels;
const size_t smallBatchSize = inputSubBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * inputChannels);
RequireSize(packedInputRows, packedInputColsPerSample * smallBatchSize);
if (zeroPadding)
SetValue((ElemType) 0);
const long halfKernelWidth = (long) kernelWidth / 2;
const long halfKernelHeight = (long) kernelHeight / 2;
#pragma omp parallel for // each input element is copied to many places
for (long sample = 0; sample < smallBatchSize; sample++)
{
for (long id = 0; id < inputDim; id++)
{
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels)
// IN_ELEM_COLPOS = sample
const long y = id / inputHeightTimesChannel; // inputCol
const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels
const long x = nXC / (long) inputChannels; // inputRow
const long c = nXC % (long) inputChannels; // channel
long x0 = 0, y0 = 0, x1 = 0, y1 = 0;
if (zeroPadding)
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1.0f + halfKernelHeight) / (ElemType)verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1.0f + halfKernelWidth) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel
}
else
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType)kernelHeight + 1) / (ElemType)verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType)kernelWidth + 1) / (ElemType)horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel
}
assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth);
// PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight)
// PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow
ElemType currentInputValue = inputSubBatch(id, sample);
long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight);
for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample)
{
long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight);
for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample)
{
const long packRow = packRowBase + posxInKernel;
const long packCol = packColBase + wrow;
(*this)(packRow, packCol) = currentInputValue;
}
packColBase += (long) outputHeight;
}
}
}
return *this;
}
//assume each column is an input sample. Each sample is stored in [channel, row, col] (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::UnpackConvolutionInput(CPUMatrix<ElemType>& inputSubBatch,
const size_t inputWidth, const size_t inputHeight, const size_t inputChannels,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputChannels*/,
const size_t kernelWidth, const size_t kernelHeight, const size_t horizontalSubsample, const size_t verticalSubsample,
const bool zeroPadding) const
{
if (verticalSubsample > kernelHeight || horizontalSubsample > kernelWidth)
LogicError("Arguments verticalSubsample (or horizonSubsample) must be less than or equal to kernelHeight (or kernelWidth).");
const size_t packedInputColsPerSample = outputWidth * outputHeight; // output size per channel
const size_t inputDim = inputWidth * inputHeight * inputChannels;
const size_t smallBatchSize = inputSubBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * inputChannels);
const long halfKernelWidth = (long) kernelWidth / 2;
const long halfKernelHeight = (long) kernelHeight / 2;
#pragma omp parallel for // each input element is copied to many places
for (long sample = 0; sample < smallBatchSize; sample++)
{
for (long id = 0; id < inputDim; id++)
{
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * inputChannels)
// IN_ELEM_COLPOS = sample
const long y = id / inputHeightTimesChannel; // inputCol
const long nXC = id % inputHeightTimesChannel; // channel + inputRow*inputChannels
const long x = nXC / (long) inputChannels; // inputRow
const long c = nXC % (long) inputChannels; // channel
long x0 = 0, y0 = 0, x1 = 0, y1 = 0;
if (zeroPadding)
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1.0f + halfKernelHeight) / (ElemType) verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x + halfKernelHeight - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1.0f + halfKernelWidth) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y + halfKernelWidth - y0 * horizontalSubsample); // first posyInKernel
}
else
{
x0 = (long) max((ElemType)0, ceil((x - (ElemType) kernelHeight + 1) / (ElemType) verticalSubsample)); // row : first wrow in which x is in
x1 = (long) (x - x0 * verticalSubsample); // first posxInKernel
y0 = (long) max((ElemType)0, ceil((y - (ElemType) kernelWidth + 1) / (ElemType) horizontalSubsample)); // col : first wcol in which y is in
y1 = (long) (y - y0 * horizontalSubsample); // first posyInKernel
}
assert(x1 >= 0 && x1 < kernelHeight && y1 >= 0 && y1 < kernelWidth);
// PACK_ELEM_ROWPOS(channel, posxInKernel, posyInKernel) = (channel * kernelWidth * kernelHeight + posxInKernel + posyInKernel * kernelHeight)
// PACK_ELEM_COLPOS(sample, wrow, wcol) = (sample*packedInputColsPerSample + outputHeight*wcol + wrow
ElemType currentInputValue = inputSubBatch(id, sample);
long packColBase = (long) (sample * packedInputColsPerSample + y0 * outputHeight);
for (long wcol = y0, posyInKernel = y1; wcol < (long) outputWidth && posyInKernel >= 0; wcol++, posyInKernel -= (long) horizontalSubsample)
{
long packRowBase = (long) (c * kernelWidth * kernelHeight + posyInKernel * kernelHeight);
for (long wrow = x0, posxInKernel = x1; wrow < (long) outputHeight && posxInKernel >= 0; wrow++, posxInKernel -= (long) verticalSubsample)
{
const long packRow = packRowBase + posxInKernel;
const long packCol = packColBase + wrow;
currentInputValue += (*this)(packRow, packCol);
}
packColBase += (long) outputHeight;
}
inputSubBatch(id, sample) = currentInputValue;
}
}
return inputSubBatch;
}
//assume each column is an input sample. Each sample is stored in (r00, g00, b00, r01, g01, b01, r10, g10, b10, r11, g11, b11)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignMaxPoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/,
const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const size_t batchSize = inputBatch.GetNumCols();
RequireSize(outputSizePerSample, batchSize);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < (long) batchSize; sample++)
{
for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++)
{
const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol
const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels
const long x = (long) (nXC / channels); // wrow
const long c = (long) (nXC % channels); // channel
ElemType maxVal = -FLT_MAX;
ElemType minVal = FLT_MAX;
const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c);
for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++)
{
long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel;
for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++)
{
const ElemType val = inputBatch(rowInInput, sample); // pf[rowInWindow*channels];
maxVal = std::max(maxVal, val);
minVal = std::min(minVal, val);
rowInInput += (long) channels;
}
}
(*this)(outputIndexWithinSample, sample) = maxVal;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddMaxPoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch, const CPUMatrix<ElemType>& inputBatch, const CPUMatrix<ElemType>& outputBatch,
const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
size_t batchSize = inputBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++)
{
const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input
const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels
const long x = (long) (nXC / channels); // row in input
const long c = (long) (nXC % channels); // channel
long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start
long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / verticalSubsample : outputHeight - 1); // inclusive end
long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start
long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end
ElemType inputValue = inputBatch(inputIndexWithinSample, sample);
for (long outY = startOutY; outY <= endOutY; outY++)
{
for (long outX = startOutX; outX <= endOutX; outX++)
{
long outputIndex = (long) (outY * outputHeightTimesChannel + outX * channels + c);
if (inputValue == outputBatch(outputIndex, sample))
(*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample);
}
}
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignAveragePoolingResult(const CPUMatrix<ElemType>& inputBatch, const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t /*inputSizePerSample*/,
const size_t /*outputWidth*/, const size_t outputHeight, const size_t outputSizePerSample,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const size_t batchSize = inputBatch.GetNumCols();
const size_t windowSize = windowWidth * windowHeight;
RequireSize(outputSizePerSample, batchSize);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long outputIndexWithinSample = 0; outputIndexWithinSample < outputSizePerSample; outputIndexWithinSample++)
{
const long y = outputIndexWithinSample / outputHeightTimesChannel; // wcol
const long nXC = outputIndexWithinSample % outputHeightTimesChannel; // channel + wrow*channels
const long x = (long) (nXC / channels); // wrow
const long c = (long) (nXC % channels); // channel
ElemType sum = 0;
const long rowInWindowBase = (long) ((x * verticalSubsample + y * horizontalSubsample * inputHeight) * channels + c);
for (long colInWindow = 0; colInWindow < windowWidth; colInWindow++)
{
long rowInInput = rowInWindowBase + colInWindow * inputHeightTimesChannel;
for (long rowInWindow = 0; rowInWindow < windowHeight; rowInWindow++)
{
sum += inputBatch(rowInInput, sample);
rowInInput += (long) channels;
}
}
(*this)(outputIndexWithinSample, sample) = sum / windowSize;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AddAveragePoolingGradient(const CPUMatrix<ElemType>& outputGradientBatch,
const size_t channels,
const size_t /*inputWidth*/, const size_t inputHeight, const size_t inputSizePerSample,
const size_t outputWidth, const size_t outputHeight, const size_t /*outputSizePerSample*/,
const size_t windowWidth, const size_t windowHeight, const size_t horizontalSubsample, const size_t verticalSubsample)
{
size_t batchSize = outputGradientBatch.GetNumCols();
const long inputHeightTimesChannel = (long) (inputHeight * channels);
const long outputHeightTimesChannel = (long) (outputHeight * channels);
const long windowSize = (long) (windowWidth * windowHeight);
// IN_ELEM_ROWPOS(channel, row, col) = (channel + (row + col * inputHeight) * channels)
// IN_ELEM_COLPOS = sample
// OUT_ELEM_ROWPOS(channel, wrow, wcol) = (channel + (wrow + wcol * outputHeight) * channels)
// OUT_ELEM_COLPOS = sample
#pragma omp parallel for
for (long sample = 0; sample < batchSize; sample++)
{
for (long inputIndexWithinSample = 0; inputIndexWithinSample < inputSizePerSample; inputIndexWithinSample++)
{
const long y = inputIndexWithinSample / inputHeightTimesChannel; // col in input
const long nXC = inputIndexWithinSample % inputHeightTimesChannel; // channel + row*chanels
const long x = nXC / (long) channels; // row in input
const long c = nXC % (long) channels; // channel
long startOutX = (long) max((ElemType)0, ceil((x - (ElemType) windowHeight + 1) / (ElemType) verticalSubsample)); // inclusive start
long endOutX = (long) ((x / verticalSubsample < outputHeight - 1) ? x / (long) verticalSubsample : outputHeight - 1); // inclusive end
long startOutY = (long) max((ElemType)0, ceil((y - (ElemType) windowWidth + 1) / (ElemType) horizontalSubsample)); // inclusive start
long endOutY = (long) ((y / horizontalSubsample < outputWidth - 1) ? y / horizontalSubsample : outputWidth - 1); // inclusive end
for (long outY = startOutY; outY <= endOutY; outY++)
{
for (long outX = startOutX; outX <= endOutX; outX++)
{
long outputIndex = outY * outputHeightTimesChannel + outX * (long) channels + c;
(*this)(inputIndexWithinSample, sample) += outputGradientBatch(outputIndex, sample) / windowSize;
}
}
}
}
return *this;
}
#pragma endregion Other Helper Functions
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionForward(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
ElemType sum = 0;
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
sum += kernel.Data()[ivBase + skip + i] * (*this)(colBase + dcol, sample);
}
output(row, sample) = sum;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionBackwardData(const CPUMatrix<ElemType>& kernel, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& grad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
ElemType curGrad = (*this)(row, sample);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
grad(colBase + dcol, sample) += curGrad * kernel.Data()[ivBase + skip + i];
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ConvolutionBackwardKernel(const CPUMatrix<ElemType>& in, const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIwht,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& kernelGrad) const
{
// Do NOT parallelize these loops!
for (size_t sample = 0; sample < GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
int ivBase = mpRowIwht(row, 0);
assert(0 <= colBase && colBase < in.GetNumRows());
ElemType curGrad = (*this)(row, sample);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < in.GetNumRows());
kernelGrad.Data()[ivBase + skip + i] += curGrad * in(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionInput(size_t unrollCols, size_t mapOutSize, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
size_t batchSize = GetNumCols();
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)batchSize; sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
output.Data()[(row * batchSize + sample) * unrollCols + skip + i] = (*this)(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionOutput(size_t unrollCols, size_t mapInCount, size_t mapOutCount, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
if (mpRowCol.GetNumRows() % mapOutCount != 0)
InvalidArgument("The number of rows in mpRowCol must be multiple of mapOutCount.");
size_t mapOutSize = mpRowCol.GetNumRows() / mapOutCount;
size_t batchSize = GetNumCols();
size_t kernelSize = runs(1, 0);
if (kernelSize % mapInCount != 0)
InvalidArgument("kernelSize must be multiple of mapInCount.");
size_t kernelMapSize = kernelSize / mapInCount;
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < std::min(size, (int)kernelMapSize); i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
size_t isrc = row;
size_t idst = ((colBase + dcol) * batchSize + sample) * unrollCols + ((skip + i) % kernelMapSize) * mapOutCount;
for (size_t outMap = 0; outMap < mapOutCount; outMap++, isrc += mapOutSize)
{
assert(isrc < GetNumElements());
assert(idst + outMap < output.GetNumElements());
output.Data()[idst + outMap] = (*this)(isrc, sample);
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::UnrollConvolutionInputForKernelBackprop(size_t mapOutSize, const CPUMatrix<int>& mpRowCol,
const CPUMatrix<int>& mpRowRun, const CPUMatrix<int>& runs, CPUMatrix<ElemType>& output) const
{
size_t batchSize = GetNumCols();
size_t unrollCols = mapOutSize * batchSize;
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)batchSize; sample++)
{
for (size_t row = 0; row < mapOutSize; row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
int i0 = mpRowRun(row, 0);
int skip = runs(i0++, 0);
int size = runs(i0++, 0);
int imask = i0 + size;
for (int i = 0; i < size; i++)
{
if (runs(imask + i, 0) == 0)
continue;
int dcol = runs(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
size_t idst = (skip + i) * unrollCols + row * batchSize + sample;
assert(idst < output.GetNumElements());
output.Data()[idst] = (*this)(colBase + dcol, sample);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxPoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
assert(std::numeric_limits<ElemType>::has_infinity);
ElemType res = -std::numeric_limits<ElemType>::infinity();
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
res = std::max(res, (*this)(colBase + dcol, sample));
}
output(row, sample) = res;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxPoolingBackward(const CPUMatrix<ElemType>& out, const CPUMatrix<ElemType>& in,
const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices,
CPUMatrix<ElemType>& grad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
ElemType g = (*this)(row, sample);
ElemType m = out(row, sample);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
if (in(colBase + dcol, sample) >= m)
{
#pragma omp atomic
grad(colBase + dcol, sample) += g;
break;
}
}
}
}
}
// For each image, for each ROI, this function treats that ROI as an image
// and does max pooling so that it has output size pooledHeight x pooledWidth.
// It loops over each location in the output tensor, computes which ROI
// and image should populate that location, computes the subset of the image
// corresponding to the ROI and which pixels in that subset should go into the
// output location, then takes the max value over that window.
// src: Images [W x H x C x N]
// roiData: ROIs [4 x numROIs x N],
// dst: Pooled ROIs [PW x PH x C x numROIs x N]
// argmax: max positions [PW x PH x C x numROIs x N]
// where PW = Pooled Width, PH = Pooled Height, C = Channels, N = Batch Size
template <class ElemType>
void CPUMatrix<ElemType>::ROIPoolingForward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height,
const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& output,
CPUMatrix<ElemType>& argmax) const
{
size_t roiOutputSize = pooledHeight * pooledWidth * channels;
#pragma omp parallel for
for (int imgIdx = 0; imgIdx < numImg; imgIdx++)
{
auto img = ColumnSlice(imgIdx, 1);
auto rois = roiData.ColumnSlice(imgIdx, 1);
#pragma omp parallel for
for (int roiIdx = 0; roiIdx < numRois; roiIdx++)
{
// each ROI is 4 elements: (x, y, w, h).
int base = roiIdx * 4;
// scaled ROI numbers (relative to original image size)
// roi points are doubles that represent location relative to image
ElemType scX = rois(base, (ElemType)0);
ElemType scY = rois(base + (ElemType)1, (ElemType)0);
ElemType scW = rois(base + (ElemType)2, (ElemType)0);
ElemType scH = rois(base + (ElemType)3, (ElemType)0);
// compute actual spatial location of the ROI in our featuremap.
size_t x = (size_t)round(scX * width);
size_t y = (size_t)round(scY * height);
ElemType roiW = (ElemType)max(round(scW * width), (ElemType)1);
ElemType roiH = (ElemType)max(round(scH * height), (ElemType)1);
const ElemType winW = roiW / (ElemType)pooledWidth;
const ElemType winH = roiH / (ElemType)pooledHeight;
// inspired by Ross Girshick fast-rcnn caffe cpu: https://github.com/rbgirshick/fast-rcnn
// loop over spatial locations in output.
#pragma omp parallel for
for (int outw = 0; outw < pooledWidth; outw++)
{
for (int outh = 0; outh < pooledHeight; outh++)
{
// compute the top left corner of the input
// spatial window corresponding to this output unit
size_t hstart = (size_t)floor(outh * winH);
size_t wstart = (size_t)floor(outw * winW);
// compute bottom right corner (not included)
size_t hend = (size_t)ceil((outh + 1) * winH);
size_t wend = (size_t)ceil((outw + 1) * winW);
// offset window based on ROI top left corner.
// these indices are into the input slice.
hstart = min(max(hstart + y, (size_t)0), height);
wstart = min(max(wstart + x, (size_t)0), width);
hend = min(max(hend + y, (size_t)0), height);
wend = min(max(wend + x, (size_t)0), width);
bool isempty = (hend <= hstart) || (wend <= wstart);
for (size_t c = 0; c < channels; c++)
{
// [W x H x C x R x N]; R = ROIs per image
size_t outputIdx = roiIdx * roiOutputSize + outw + outh * pooledWidth + c * pooledHeight * pooledWidth;
size_t maxidx = 0;
ElemType maxval = isempty ? (ElemType)0 : -FLT_MAX;
size_t baseIdx = c * height * width;
for (size_t h = hstart; h < hend; h++)
{
for (size_t w = wstart; w < wend; w++)
{
// stored argmax indices are relative to the current channel.
size_t dataIdx = w + h * width;
if (img(baseIdx + dataIdx, 0) > maxval)
{
maxval = img(baseIdx + dataIdx, 0);
maxidx = dataIdx;
}
}
}
output(outputIdx, imgIdx) = maxval;
argmax(outputIdx, imgIdx) = maxidx;
}
}
}
}
}
}
// This function loops over locations in the input to the ROIPoolingNode (image locations).
// It loops over the ROIs corresponding to that image, seeing which ones could contain the current location
// in their output. For each ROI, it checks the argmax data to see if that ROI indeed chose
// this pixel location as the maximum. If so, it increments the gradient term for the input location.
template <class ElemType>
void CPUMatrix<ElemType>::ROIPoolingBackward(const size_t numRois, const size_t numImg, const size_t channels, const size_t width, const size_t height,
const size_t pooledWidth, const size_t pooledHeight, const CPUMatrix<ElemType>& roiData, CPUMatrix<ElemType>& grad,
CPUMatrix<ElemType>& argmax) const
{
// loop over images in the batch.
#pragma omp parallel for
for (int imgIdx = 0; imgIdx < numImg; imgIdx++)
{
// ROIs for this image. length 4*numRois;
auto rois = roiData.ColumnSlice(imgIdx, 1).Data();
// gradient values for all ROIs from this image. length numRois*pooledHeight*pooledWidth*channels;
auto pooledGrad = ColumnSlice(imgIdx, 1).Data();
auto argmaxCol = argmax.ColumnSlice(imgIdx, 1).Data();
// loop over spatial locations in the image.
#pragma omp parallel for
for (int w = 0; w < width; w++)
{
#pragma omp parallel for
for (int h = 0; h < width; h++)
{
// loop over the ROIs seeing which ones contain this location.
for (int roiN = 0; roiN < numRois; roiN++)
{
// each ROI is 4 elements: (x, y, w, h).
int roiOffset = roiN * 4;
// ROI data is relative to original image size
size_t roiStartW = (size_t)round(rois[roiOffset + 0] * width);
size_t roiStartH = (size_t)round(rois[roiOffset + 1] * height);
size_t roiWidth = max((size_t)round(rois[roiOffset + 2] * width), (size_t)1);
size_t roiHeight = max((size_t)round(rois[roiOffset + 3] * height), (size_t)1);
// skip this ROI if it doesn't contain the current input location.
const bool inROI = (w >= roiStartW && w < roiStartW + roiWidth &&
h >= roiStartH && h < roiStartH + roiHeight);
if (!inROI)
continue;
ElemType winH = (ElemType)roiHeight / (ElemType)pooledHeight;
ElemType winW = (ElemType)roiWidth / (ElemType)pooledWidth;
// what pooled nodes in the output for this ROI could have pooled this input location?
size_t phstart = (size_t)((h - roiStartH) / winH);
size_t pwstart = (size_t)((w - roiStartW) / winW);
size_t phend = (size_t)(ceil((h - roiStartH + 1) / winH));
size_t pwend = (size_t)(ceil((w - roiStartW + 1) / winW));
phstart = min(max(phstart, (size_t)0), pooledHeight);
phend = min(max(phend, (size_t)0), pooledHeight);
pwstart = min(max(pwstart, (size_t)0), pooledWidth);
pwend = min(max(pwend, (size_t)0), pooledWidth);
for (size_t c = 0; c < channels; c++)
{
ElemType gradient = 0;
// [W x H x C x N]
size_t index = w + h*width + c*height*width;
// go right up to channel c of the current ROI.
size_t offset = (roiN * channels + c) * pooledWidth * pooledHeight;
const ElemType* offsetPoolGrad = pooledGrad + offset;
const ElemType* offsetArgmax = argmaxCol + offset;
for (size_t ph = phstart; ph < phend; ph++)
{
for (size_t pw = pwstart; pw < pwend; pw++)
{
if ((size_t)offsetArgmax[ph * pooledWidth + pw] == (w + h * width))
gradient += offsetPoolGrad[ph * pooledWidth + pw];
}
}
grad(index, imgIdx) = gradient;
}
}
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::MaxUnpooling(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices,
const CPUMatrix<int>& indices, const CPUMatrix<ElemType>& poolInput,
CPUMatrix<ElemType>& input) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < input.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
ElemType curMax = poolInput(colBase + indices(i0, 0), sample);
ElemType prevMax = curMax;
int imax = 0;
for (int i = 1; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < poolInput.GetNumRows());
curMax = std::max(curMax, poolInput(colBase + dcol, sample));
if (curMax > prevMax)
{
prevMax = curMax;
imax = i;
}
}
int dcol = indices(i0 + imax, 0);
assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows());
input(colBase + dcol, sample) = (*this)(row, sample);
//int i = (int)poolIn(row, sample);
//assert(0 <= i && i < size);
//int dcol = indices(i0 + i, 0);
//assert(0 <= colBase + dcol && colBase + dcol < input.GetNumRows());
//input(colBase + dcol, sample) = (*this)(row, sample);
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AveragePoolingForward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& output, const bool poolIncludePad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)output.GetNumCols(); sample++)
{
for (size_t row = 0; row < output.GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < GetNumRows());
ElemType sum = 0;
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
assert(size > 0);
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < GetNumRows());
sum += (*this)(colBase + dcol, sample);
}
// Note that we divide by size which is the number of actual elements (does not include padding).
// if poolIncludePad == true, use avg_pool_include_pad
if (poolIncludePad)
size = indices(0, 0);
output(row, sample) = sum / size;
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::AveragePoolingBackward(const CPUMatrix<int>& mpRowCol, const CPUMatrix<int>& mpRowIndices, const CPUMatrix<int>& indices, CPUMatrix<ElemType>& grad, const bool poolIncludePad) const
{
#pragma omp parallel for
for (int64_t sample = 0; sample < (int64_t)GetNumCols(); sample++)
{
for (size_t row = 0; row < GetNumRows(); row++)
{
int colBase = mpRowCol(row, 0);
assert(0 <= colBase && colBase < grad.GetNumRows());
int i0 = mpRowIndices(row, 0);
int size = indices(i0++, 0);
int tmp = size;
if (poolIncludePad)
size = indices(0, 0);
assert(size > 0);
ElemType g = (*this)(row, sample) / size;
size = tmp;
for (int i = 0; i < size; i++)
{
int dcol = indices(i0 + i, 0);
assert(0 <= colBase + dcol && colBase + dcol < grad.GetNumRows());
#pragma omp atomic
grad(colBase + dcol, sample) += g;
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::BatchNormalizationForward(const CPUMatrix<ElemType>& scale, const CPUMatrix<ElemType>& bias, bool inferenceOnly, double expAvgFactor, double blendFactor,
CPUMatrix<ElemType>& runMean, CPUMatrix<ElemType>& runVariance, CPUMatrix<ElemType>& out, double epsilon,
CPUMatrix<ElemType>& saveMean, CPUMatrix<ElemType>& saveInvStdDev) const
{
if (GetNumRows() % scale.GetNumRows() != 0)
LogicError("The number of rows of this matrx must be multiple of the number of rows of the scale matrix.");
if (!inferenceOnly || expAvgFactor != 0 || blendFactor != 1)
RuntimeError("Batch normalization training on CPU is not yet implemented.");
saveMean.Resize(0, 0); // only doing inference: these two are not produced
saveInvStdDev.Resize(0, 0);
bool spatial = GetNumRows() != scale.GetNumRows();
if (spatial)
{
size_t spatialSize = GetNumRows() / scale.GetNumRows();
#pragma omp parallel for
for (long icol = 0; icol < out.GetNumCols(); icol++)
{
for (long irow = 0; irow < out.GetNumRows(); irow++)
{
size_t imap = irow / spatialSize;
ElemType stdDev = sqrt(runVariance(imap, 0) + epsilon);
out(irow, icol) = scale(imap, 0) * ((*this)(irow, icol) - runMean(imap, 0)) / stdDev + bias(imap, 0);
}
}
}
else
{
#pragma omp parallel for
for (long icol = 0; icol < out.GetNumCols(); icol++)
{
for (long irow = 0; irow < out.GetNumRows(); irow++)
{
ElemType stdDev = sqrt(runVariance(irow, 0) + epsilon);
out(irow, icol) = scale(irow, 0) * ((*this)(irow, icol) - runMean(irow, 0)) / stdDev + bias(irow, 0);
}
}
}
}
template <class ElemType>
void CPUMatrix<ElemType>::BatchNormalizationBackward(const CPUMatrix<ElemType>& in, CPUMatrix<ElemType>& grad, const CPUMatrix<ElemType>& scale, double blendFactor,
const CPUMatrix<ElemType>& saveMean, const CPUMatrix<ElemType>& saveInvStdDev,
CPUMatrix<ElemType>& scaleGrad, CPUMatrix<ElemType>& biasGrad) const
{
UNUSED(in); UNUSED(grad); UNUSED(scale); UNUSED(blendFactor), UNUSED(saveMean); UNUSED(saveInvStdDev); UNUSED(scaleGrad); UNUSED(biasGrad);
RuntimeError("Batch normalization training on CPU is not yet implemented.");
}
#pragma region Static BLAS Functions
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = alpha * op(a) * op(b) + beta*c</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="beta">Scalar</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::MultiplyAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
ElemType beta, CPUMatrix<ElemType>& c, shared_ptr<QuantizedMultiplier<ElemType>> pQuantizedMultiplier)
{
if (a.IsEmpty() || b.IsEmpty())
return;
int m, n, k, l;
int lda, ldb, ldc;
CBLAS_TRANSPOSE mklTransA;
CBLAS_TRANSPOSE mklTransB;
if (transposeA)
{
m = (int) a.GetNumCols();
k = (int) a.GetNumRows();
lda = k;
mklTransA = CBLAS_TRANSPOSE::CblasTrans;
}
else
{
m = (int) a.GetNumRows();
k = (int) a.GetNumCols();
lda = m;
mklTransA = CBLAS_TRANSPOSE::CblasNoTrans;
}
if (transposeB)
{
l = (int) b.GetNumCols();
n = (int) b.GetNumRows();
ldb = n;
mklTransB = CBLAS_TRANSPOSE::CblasTrans;
}
else
{
l = (int) b.GetNumRows();
n = (int) b.GetNumCols();
ldb = l;
mklTransB = CBLAS_TRANSPOSE::CblasNoTrans;
}
assert(m > 0 && k > 0 && l > 0 && n > 0); // converting from size_t to int may cause overflow
if (k != l)
InvalidArgument("CPUMatrix<ElemType>::MultiplyAndWeightedAdd : The inner dimensions of a and b must match.");
if (beta == 0)
c.RequireSize(m, n);
else
c.VerifySize(m, n); // Can't resize if beta != 0
ldc = (int) c.GetNumRows();
if (pQuantizedMultiplier == nullptr)
{
if (sizeof(ElemType) == sizeof(double))
{
cblas_dgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<double*>(a.Data()), lda, reinterpret_cast<double*>(b.Data()), ldb, beta, reinterpret_cast<double*>(c.Data()), ldc);
}
else
{
#pragma warning(suppress : 4244)
cblas_sgemm((CBLAS_ORDER) (int)MatrixOrder::ColMajor, mklTransA, mklTransB, m, n, k, alpha, reinterpret_cast<float*>(a.Data()), lda, reinterpret_cast<float*>(b.Data()), ldb, beta, reinterpret_cast<float*>(c.Data()), ldc);
}
}
else
{
// TODO: support transpose product
if (mklTransA == CBLAS_TRANSPOSE::CblasTrans || mklTransB == CBLAS_TRANSPOSE::CblasTrans)
LogicError("Quantized multiplier currently doesn't support transpose.");
pQuantizedMultiplier->Multiply(m, n, k, a.Data(), b.Data(), c.Data());
}
}
template <class ElemType>
void CPUMatrix<ElemType>::Multiply1x1AndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b,
ElemType beta, CPUMatrix<ElemType>& c)
{
if (a.GetNumElements() != 1)
InvalidArgument("the argument a must be a scalar"); // a is a scalar
ElemType f = alpha * a.Get00Element();
if (beta == 0) // don't even read the memory if beta is 0
#pragma omp parallel for
foreach_coord (i, j, c)
c(i, j) = b(i, j) * f;
else
#pragma omp parallel for
foreach_coord (i, j, c)
c(i, j) = b(i, j) * f + c(i, j) * beta;
}
template <class ElemType>
void CPUMatrix<ElemType>::ColumnwiseScaleAndWeightedAdd(ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& v, ElemType beta, CPUMatrix<ElemType>& c)
{
if (v.GetNumRows() != 1 && v.GetNumCols() != 1)
InvalidArgument("the argument v must be a vector"); // v is a vector
if (beta == 0)
c.RequireSize(a.GetNumRows(), a.GetNumCols());
else
c.VerifySize(a.GetNumRows(), a.GetNumCols()); // Can't resize if beta != 0
const ElemType* vd = v.Data();
if (beta == 0) // don't even read the memory if beta is 0
#pragma omp parallel for
foreach_coord(i, j, c)
c(i, j) = alpha * a(i, j) * vd[j];
else
#pragma omp parallel for
foreach_coord(i, j, c)
c(i, j) = alpha * a(i, j) * vd[j] + c(i, j) * beta;
}
/* compute singular value decomposition as
A = U*SIGMA*VT
W is used as temp working memory
*/
template <class ElemType>
void CPUMatrix<ElemType>::SVD(const CPUMatrix<ElemType>& A, CPUMatrix<ElemType>& SIGMA, CPUMatrix<ElemType>& U, CPUMatrix<ElemType>& VT, CPUMatrix<ElemType>& W)
{
if (A.IsEmpty())
LogicError("SVD: input matrix is empty.");
int info;
int m, n, lda, ldu, ldvt;
m = (int) A.GetNumRows();
n = (int) A.GetNumCols();
W.GetNumRows(); // W is used as temp working memory
lda = m;
ldu = m;
ldvt = n;
U.RequireSize(m, m);
SIGMA.RequireSize(std::min(m, n), 1);
VT.RequireSize(n, n);
if (sizeof(ElemType) == sizeof(double))
{
#ifdef USE_MKL
double wkopt;
int lwork = -1;
dgesvd("All", "All", &m, &n, reinterpret_cast<double*>(A.Data()), &lda, reinterpret_cast<double*>(SIGMA.Data()), reinterpret_cast<double*>(U.Data()), &ldu, reinterpret_cast<double*>(VT.Data()), &ldvt, &wkopt, &lwork, &info);
lwork = (int) wkopt;
W.RequireSize(lwork, 1);
dgesvd("All", "All", &m, &n, reinterpret_cast<double*>(A.Data()), &lda, reinterpret_cast<double*>(SIGMA.Data()), reinterpret_cast<double*>(U.Data()), &ldu, reinterpret_cast<double*>(VT.Data()), &ldvt, reinterpret_cast<double*>(W.Data()), &lwork, &info);
#else
std::vector<double> superb(std::max(std::min(m, n) - 1, 1));
info = LAPACKE_dgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<double*>(A.Data()), (int) lda, reinterpret_cast<double*>(SIGMA.Data()),
reinterpret_cast<double*>(U.Data()), (int) ldu, reinterpret_cast<double*>(VT.Data()), (int) ldvt, &superb[0]);
#endif
}
else
{
#ifdef USE_MKL
float wkopt;
int lwork = -1;
sgesvd("All", "All", &m, &n, reinterpret_cast<float*>(A.Data()), &lda, reinterpret_cast<float*>(SIGMA.Data()), reinterpret_cast<float*>(U.Data()), &ldu, reinterpret_cast<float*>(VT.Data()), &ldvt, &wkopt, &lwork, &info);
lwork = (int) wkopt;
W.RequireSize(lwork, 1);
sgesvd("All", "All", &m, &n, reinterpret_cast<float*>(A.Data()), &lda, reinterpret_cast<float*>(SIGMA.Data()), reinterpret_cast<float*>(U.Data()), &ldu, reinterpret_cast<float*>(VT.Data()), &ldvt, reinterpret_cast<float*>(W.Data()), &lwork, &info);
#else
std::vector<float> superb(std::max(std::min(m, n) - 1, 1));
info = LAPACKE_sgesvd((int) MatrixOrder::ColMajor, 'A', 'A', (int) m, (int) n, reinterpret_cast<float*>(A.Data()), (int) lda, reinterpret_cast<float*>(SIGMA.Data()),
reinterpret_cast<float*>(U.Data()), (int) ldu, reinterpret_cast<float*>(VT.Data()), (int) ldvt, &superb[0]);
#endif
}
if (info > 0)
{
RuntimeError("The algorithm computing SVD failed to converge.\n");
}
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b) + c</summary>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::MultiplyAndAdd(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 1.0, c);
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignSoftmaxSum(const CPUMatrix<ElemType>& softmax, CPUMatrix<ElemType>& c)
{
ElemType log_likelihood = 0.0;
size_t batch_size = GetNumCols();
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
{
int sample = (int) (*this)(0, instance_id);
log_likelihood += softmax(instance_id, sample);
}
c(0, 0) = -log_likelihood;
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignNCEUnnormalizedEval(const CPUMatrix<ElemType>& a,
const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& c)
//this: samples+probs
// a: hidden
// b: embedding
// tmp: softmax
// c: loglikelihood
{
ElemType log_likelihood = 0.0;
size_t batch_size = GetNumCols();
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
{
int sample = -(int) (*this)(0, instance_id);
ElemType score = bias(sample, 0);
for (int dim = 0; dim < b.GetNumRows(); dim++)
score += b(dim, sample) * a(dim, instance_id);
log_likelihood += score;
}
c(0, 0) = -log_likelihood;
}
//samples+prob gradient hidden embedding embedding/hidden
//a.m_CPUMatrix->AssignNCEDerivative(*tmp.m_CPUMatrix, *a.m_CPUMatrix, *b.m_CPUMatrix, inputIndex, *c.m_CPUMatrix);
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignNCEDerivative(const CPUMatrix<ElemType>& tmp, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t inputIndex, CPUMatrix<ElemType>& c)
{
size_t sample_size = GetNumRows() / 2;
size_t batch_size = GetNumCols();
if (inputIndex == 1)
{
#pragma omp parallel for
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
for (int dim = 0; dim < b.GetNumRows(); dim++)
c(dim, instance_id) -= b(dim, sample) * tmp(sample_id, instance_id);
}
}
else if (inputIndex == 2)
{
int i_blocks = omp_get_num_threads() * 16;
// Assume only one block in k direction.
// We don't need to explicitly block in the j direction.
#pragma omp parallel for
for (int ib = 0; ib < i_blocks; ib++)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
if (sample % i_blocks == ib)
for (int dim = 0; dim < b.GetNumRows(); dim++)
c(dim, sample) -= a(dim, instance_id) * tmp(sample_id, instance_id);
}
}
else if (inputIndex == 3)
{
// Assume only one block in k direction.
// We don't need to explicitly block in the j direction.
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
c(0, sample) -= tmp(sample_id, instance_id);
}
}
else
InvalidArgument("The argument inputIndex must be 1 or 2 or 3.");
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::AssignNoiseContrastiveEstimation(const CPUMatrix<ElemType>& a,
const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& bias, CPUMatrix<ElemType>& tmp, CPUMatrix<ElemType>& c)
//this: samples+probs
// a: hidden
// b: embedding
// tmp: softmax
// c: loglikelihood
{
double log_likelihood = 0.0;
size_t sample_size = GetNumRows() / 2;
size_t batch_size = GetNumCols();
size_t num_noise_samples = sample_size - 1;
double log_num_noise_samples = std::log(num_noise_samples);
#pragma omp parallel for reduction(+ : log_likelihood)
for (int instance_id = 0; instance_id < batch_size; instance_id++)
for (int sample_id = 0; sample_id < sample_size; sample_id++)
{
int sample = (int) (*this)(2 * sample_id, instance_id);
double score = bias(0, sample);
for (int dim = 0; dim < b.GetNumRows(); dim++)
score += a(dim, instance_id) * b(dim, sample);
double sample_prob = -(*this)(2 * sample_id + 1, instance_id);
if (sample_id == 0)
sample_prob = -sample_prob;
double score_noise = log_num_noise_samples + sample_prob;
double z = LogAdd(score, score_noise);
double logprob = score - z;
double logprob_noise = score_noise - z;
tmp(sample_id, instance_id) = (ElemType) -std::exp(logprob);
if (sample_id == 0)
tmp(sample_id, instance_id) += 1;
log_likelihood += sample_id == 0 ? logprob : logprob_noise;
}
c(0, 0) = (ElemType) -log_likelihood;
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b may be transposed): c = op(a) * op(b)</summary>
/// <param name="a">Input matrix</param>
/// <param name="transposeA">Whether matrix a is transposed</param>
/// <param name="b">Input matrix</param>
/// <param name="transposeB">Whether matrix b is transposed</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const bool transposeA, const CPUMatrix<ElemType>& b, const bool transposeB,
CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, transposeA, b, transposeB, 0.0, c);
}
/// <summary>Matrix-matrix multiply with col-major matrices (a and b are not transposed): c = a * b</summary>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::Multiply(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
return CPUMatrix<ElemType>::MultiplyAndWeightedAdd(1.0, a, false, b, false, 0.0, c);
}
/// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a + c</summary>
/// if a is a column vector, add to all columns of c
/// if a is a row vector, add to all rows of c
/// if a is a scalar, add to all rows of c
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::ScaleAndAdd(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty() || c.IsEmpty())
LogicError("ScaleAndAdd: one of the input matrices is empty.");
if (a.GetNumRows() != 1 && a.GetNumCols() != 1) // a is not a col or row vector
{
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int len = m * n;
const int incx = 1;
const int incy = 1;
assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow
if ((int) c.GetNumRows() != m || (int) c.GetNumCols() != n)
InvalidArgument("Dimension of matrix c does not match dimension of matrix a.");
if (sizeof(ElemType) == sizeof(double))
{
cblas_daxpy(len, alpha, reinterpret_cast<double*>(a.Data()), incx, reinterpret_cast<double*>(c.Data()), incy);
}
else
{
#pragma warning(suppress : 4244)
cblas_saxpy(len, alpha, reinterpret_cast<float*>(a.Data()), incx, reinterpret_cast<float*>(c.Data()), incy);
}
}
else if (a.GetNumElements() == 1) // scalar, add to all elements
{
ElemType v = alpha * a(0, 0);
long m = (long) c.GetNumRows(), n = (long) c.GetNumCols();
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
c(i, j) += v;
c(i + 1, j) += v;
c(i + 2, j) += v;
c(i + 3, j) += v;
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
c(i, j) += v;
}
}
}
else if (a.GetNumCols() == 1) // col vector, add it to all columns
{
int m = (int) c.GetNumRows();
if (m != (int) a.GetNumRows())
InvalidArgument("To add column vector, rows should match.");
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, c)
{
cblas_daxpy(m, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + c.LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
cblas_saxpy(m, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + c.LocateColumn(j)), 1);
}
}
}
else // row vector, add it to all rows
{
int m = (int) c.GetNumRows();
int n = (int) c.GetNumCols();
if (n != (int) a.GetNumCols())
InvalidArgument("To add row vector, cols should match.");
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
cblas_daxpy(n, alpha, reinterpret_cast<double*>(aBufPtr), 1, reinterpret_cast<double*>(cBufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
cblas_saxpy(n, alpha, reinterpret_cast<float*>(aBufPtr), 1, reinterpret_cast<float*>(cBufPtr + i), m);
}
}
}
}
/// <summary>c += alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AddScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumRows() == c.GetNumRows() &&
a.GetNumCols() == b.GetNumCols() && a.GetNumCols() == c.GetNumCols()))
{
InvalidArgument("AddScaledDifference: a, b, and c must have same dimension.");
}
if (a.IsEmpty())
LogicError("AddScaledDifference: Input matrix a is empty.");
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
long m = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]);
cBufPtr[i + 1] += alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]);
cBufPtr[i + 2] += alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]);
cBufPtr[i + 3] += alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
cBufPtr[i] += alpha * (aBufPtr[i] - bBufPtr[i]);
}
}
/// <summary> c = alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AssignScaledDifference(const ElemType alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
{
InvalidArgument("AssignScaledDifference: a, b must have same dimension.");
}
if (a.IsEmpty())
LogicError("AssignScaledDifference: Input matrix a is empty.");
if (&c != &a && &c != &b)
c.RequireSize(a.GetNumRows(), a.GetNumCols());
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
ElemType* cBufPtr = c.Data();
long m = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (m & ~3); i += 4)
{
cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]);
cBufPtr[i + 1] = alpha * (aBufPtr[i + 1] - bBufPtr[i + 1]);
cBufPtr[i + 2] = alpha * (aBufPtr[i + 2] - bBufPtr[i + 2]);
cBufPtr[i + 3] = alpha * (aBufPtr[i + 3] - bBufPtr[i + 3]);
}
// handle remaining stuffs
for (long i = m & ~3; i < m; i++)
{
cBufPtr[i] = alpha * (aBufPtr[i] - bBufPtr[i]);
}
}
// c[ci,cj] += a[ai,aj]
template <class ElemType>
void CPUMatrix<ElemType>::AddElementToElement(ElemType beta, const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
{
if (ai >= a.GetNumRows() || aj >= a.GetNumCols() ||
ci >= c.GetNumRows() || cj >= c.GetNumCols())
InvalidArgument("AddElementToElement: index out of range.");
ElemType us = beta ? beta * c(ci, cj) : 0; // do not multiply if beta is 0, could be a NaN
us += a(ai, aj);
c(ci, cj) = us;
}
////c[ci,cj] += a[ai,aj]
//template<class ElemType>
//void CPUMatrix<ElemType>::AddLogElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
//{
// if (ai >= a.GetNumRows() || aj >=a.GetNumCols() ||
// ci >= c.GetNumRows() || cj >=c.GetNumCols())
// InvalidArgument("AddElementToElement: index out of range.");
//
// ElemType v = a(ai,aj);
// c(ci, cj) += ((v < EPS_IN_LOG) ? LOG_OF_EPS_IN_LOG : log(v));
//}
#if 0 // now done as AddElementToElement (beta=0)
// c[ci,cj] = a[ai,aj]
template <class ElemType>
void CPUMatrix<ElemType>::AssignElementToElement(const CPUMatrix<ElemType>& a, const size_t ai, const size_t aj, CPUMatrix<ElemType>& c, const size_t ci, const size_t cj)
{
if (ai >= a.GetNumRows() || aj >= a.GetNumCols() ||
ci >= c.GetNumRows() || cj >= c.GetNumCols())
InvalidArgument("AssignElementToElement: index out of range.");
c(ci, cj) = a(ai, aj);
}
#endif
/// <summary>c += alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">1X1 matrix</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AddScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (alpha.GetNumElements() != 1)
InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix.");
AddScaledDifference(alpha(0, 0), a, b, c);
}
/// <summary> c = alpha * (a-b)</summary>
/// if a, b, c must have same dim
/// <param name="alpha">1X1 matrix</param>
/// <param name="a">Input matrix</param>
/// <param name="b">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
void CPUMatrix<ElemType>::AssignScaledDifference(const CPUMatrix<ElemType>& alpha, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
if (alpha.GetNumElements() != 1)
InvalidArgument("AddScaledDifference: alpha must be a 1X1 matrix.");
AssignScaledDifference(alpha(0, 0), a, b, c);
}
/// <summary>Matrix-scalar multiply with col-major matrices: c = alpha * a</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
/// <param name="c">Resulting matrix, user is responsible for allocating this</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
c.RequireSize(m, n);
ElemType* aBufPtr = a.Data();
ElemType* cBufPtr = c.Data();
if (alpha == 0)
{
memset(cBufPtr, 0, sizeof(ElemType) * c.GetNumElements());
return;
}
long size = (long) c.GetNumElements();
#pragma omp parallel for
// four-way unrolling
for (long i = 0; i < (size & ~3); i += 4)
{
cBufPtr[i] = alpha * aBufPtr[i];
cBufPtr[i + 1] = alpha * aBufPtr[i + 1];
cBufPtr[i + 2] = alpha * aBufPtr[i + 2];
cBufPtr[i + 3] = alpha * aBufPtr[i + 3];
}
// remaining elements
for (long i = size & ~3; i < size; i++)
{
cBufPtr[i] = alpha * aBufPtr[i];
}
}
/// <summary>Matrix-scalar multiply with col-major matrices: a = alpha * a</summary>
/// <param name="alpha">Scalar</param>
/// <param name="a">Input matrix</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(ElemType alpha, CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int len = m * n;
const int incx = 1;
assert(m > 0 && n > 0 && len > 0); // converting from size_t to int may cause overflow
if (alpha == 0 && incx == 1)
{
memset(a.Data(), 0, sizeof(ElemType) * len);
}
else if (sizeof(ElemType) == sizeof(double))
{
cblas_dscal(len, alpha, reinterpret_cast<double*>(a.Data()), incx);
}
else
{
#pragma warning(suppress : 4244)
cblas_sscal(len, alpha, reinterpret_cast<float*>(a.Data()), incx);
}
}
/// <summary>Matrix multiply with col-major matrices: a = alpha[1,1] * a</summary>
/// <param name="alpha">1x1 matrix</param>
/// <param name="a">Input matrix</param>
template <class ElemType>
/*static*/ void CPUMatrix<ElemType>::Scale(CPUMatrix<ElemType> alpha, CPUMatrix<ElemType>& a)
{
if (a.IsEmpty())
LogicError("Scale: Input matrix a is empty.");
if (alpha.GetNumElements() != 1)
LogicError("Matrix alpha must be 1x1");
CPUMatrix<ElemType>::Scale(alpha(0, 0), a);
}
template <class ElemType>
void CPUMatrix<ElemType>::InnerProduct(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product
{
c.AssignElementProductOf(a, b);
}
else if (isColWise) // col-wise
{
c.RequireSize(1, n);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_column (j, c)
{
c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1);
}
}
else
{
#pragma omp parallel for
foreach_column (j, c)
{
#pragma warning(suppress : 4244)
c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1);
}
}
}
else
{
c.RequireSize(m, 1);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m);
}
}
}
}
// treat matrices as vectors. do vec(a)^T vec(b)
template <class ElemType>
ElemType CPUMatrix<ElemType>::InnerProductOfMatrices(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProductOfMatrices: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProductOfMatrices: Matrices a and b should have same dimension.");
if (sizeof(ElemType) == sizeof(double))
{
return (ElemType) cblas_ddot((int) a.GetNumElements(), reinterpret_cast<double*>(a.Data()), 1, reinterpret_cast<double*>(b.Data()), 1);
}
else
{
#pragma warning(suppress : 4244)
return (ElemType) cblas_sdot((int) a.GetNumElements(), reinterpret_cast<float*>(a.Data()), 1, reinterpret_cast<float*>(b.Data()), 1);
}
}
template <class ElemType>
void CPUMatrix<ElemType>::ElementWisePower(ElemType alpha, const CPUMatrix<ElemType>& a, CPUMatrix<ElemType>& c)
{
if (a.IsEmpty())
LogicError("Scale: The input matrix a is empty.");
c.RequireSize(a.GetNumRows(), a.GetNumCols());
if (alpha == 2)
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = a(i, j) * a(i, j);
}
}
else if (alpha == 3)
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = a(i, j) * a(i, j) * a(i, j);
}
}
else
{
#pragma omp parallel for
foreach_coord (i, j, c)
{
c(i, j) = pow(a(i, j), alpha);
}
}
}
template <class ElemType>
bool CPUMatrix<ElemType>::AreEqual(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const ElemType threshold /*= 1e-8*/)
{
if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols())
return false;
bool result = true;
#pragma omp parallel for
foreach_coord (i, j, a)
{
if (abs(a(i, j) - b(i, j)) > threshold)
{
result = false;
break;
}
}
return result;
}
// see Matrix<ElemType>::TensorShuffleScaleAndAdd() for comments
template <class ElemType>
void CPUMatrix<ElemType>::TensorShuffleScaleAndAdd(ElemType keepWeight, const CPUMatrix<ElemType>& a, size_t D, size_t S, size_t M, size_t K, size_t T, ElemType scaleFactor, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c)
{
size_t N = D * S * M * K * T;
const auto pa = a.Data();
const auto pb = b.Data();
auto pc = c.Data();
// Note: This code is written to match a GPU implementation. It is not super-efficient on the CPU.
for (size_t na = 0; na < N; na++) // loop over all elements
{
// recover the 5 indices from the loop counter
size_t d = na % D;
size_t s = (na / D) % S;
size_t m = (na / D / S) % M;
size_t k = (na / D / S / M) % K;
size_t t = (na / D / S / M / K) % T;
// compute index for the a and b/c tensors
assert(na == (((t * K + k) * M + m) * S + s) * D + d); // input tensor of dimension (D x S x M x K x T)
size_t nb = (((t * S + s) * M + m) * K + k) * D + d; // output tensor of dimension (D x K x M x S x T): k/K and s/S swapped
assert(nb < N);
// perform the computation
ElemType cval = keepWeight ? keepWeight * pb[nb] : 0; // if weight is 0 then don't bother to read memory (efficiency) or to multiply (NaN-safe)
cval += scaleFactor * pa[na];
pc[nb] = cval;
}
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Ones(const size_t rows, const size_t cols)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetValue(1);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Zeros(const size_t rows, const size_t cols)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetValue(0);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::Eye(const size_t rows)
{
CPUMatrix<ElemType> c(rows, rows); // will initialize to 0
c.SetDiagonalValue(1);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomUniform(const size_t rows, const size_t cols, const ElemType low, const ElemType high, unsigned long seed)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetUniformRandomValue(low, high, seed);
return c;
}
template <class ElemType>
CPUMatrix<ElemType> CPUMatrix<ElemType>::RandomGaussian(const size_t rows, const size_t cols, const ElemType mean, const ElemType sigma, unsigned long seed)
{
CPUMatrix<ElemType> c(rows, cols); // will initialize to 0
c.SetGaussianRandomValue(mean, sigma, seed);
return c;
}
template <class ElemType>
bool CPUMatrix<ElemType>::HasElement(const CPUMatrix<ElemType>& mat, const ElemType v)
{
bool bHas = false;
bool isvFinite = std::isfinite(v);
#pragma omp parallel for
for (long j = 0; j < mat.GetNumElements(); j++)
{
#pragma omp flush(bHas)
if (!bHas)
{
ElemType cur = mat.Data()[j];
if (isvFinite && std::isfinite(cur))
{
if (cur == v)
bHas = true;
}
else if (std::isnan(v) && std::isnan(cur))
bHas = true;
else if (std::isinf(v) && std::isinf(cur) && std::signbit(v) == std::signbit(cur))
bHas = true;
}
}
return bHas;
}
// CPUMatrix<ElemType>& AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber);
//[this]=a .* b
// here, a and b must be two row vectors of the same size, i.e. [1,m]
// the inputs are two rwo vectors
// the output is a matrix of size(neg+1, col)
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift, size_t negnumber)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty.");
if (!(a.GetNumRows() == b.GetNumRows() && a.GetNumCols() == b.GetNumCols()))
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match.");
if (a.GetNumRows() != 1)
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector.");
auto& us = *this;
if (this != &a)
{
RequireSize(negnumber + 1, a.GetNumCols());
// RequireSize(a.GetNumRows(), a.GetNumCols());
}
long m = (long) GetNumRows(), n = (long) GetNumCols(); // a and b are of size (1,n)
// #pragma omp parallel for
for (long j = 0; j < n; j++)
{
us(0, j) = a(0, j) * b(0, j);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < m; i++)
{
us(i, j) = a(0, j) * b(0, (j + shift + i - 1) % n);
}
}
return *this;
}
template <class ElemType>
void CPUMatrix<ElemType>::InnerProductWithShiftNeg(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, const bool isColWise, size_t shift, size_t negnumber)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != k || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
if ((isColWise && m == 1) || !isColWise && n == 1) // in this case it's equivalent to element-wise product
{
InvalidArgument("InnerProduct: Both matrices should be normal ones, not vectors");
// c.AssignElementProductOf(a, b);
}
else if (isColWise) // col-wise
{
c.RequireSize(negnumber + 1, n); // this line ischanged
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
for (long j = 0; j < n; j++)
{
c(0, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn(j)), 1);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < negnumber + 1; i++)
{
c(i, j) = (ElemType) cblas_ddot(m, reinterpret_cast<double*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<double*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1);
}
}
}
else
{
for (long j = 0; j < n; j++)
{
c(0, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn(j)), 1);
}
for (long j = 0; j < n; j++)
{
for (long i = 1; i < negnumber + 1; i++)
{
c(i, j) = (ElemType) cblas_sdot(m, reinterpret_cast<float*>(aBufPtr + a.LocateColumn(j)), 1, reinterpret_cast<float*>(bBufPtr + b.LocateColumn((j + shift + i - 1) % n)), 1);
}
}
}
}
else
{
InvalidArgument("InnerProduct: Rowwise is not supported yet");
c.RequireSize(m, 1);
ElemType* aBufPtr = a.Data();
ElemType* bBufPtr = b.Data();
if (sizeof(ElemType) == sizeof(double))
{
#pragma omp parallel for
foreach_row (i, c)
{
c(i, 0) = (ElemType) cblas_ddot(n, reinterpret_cast<double*>(aBufPtr + i), m, reinterpret_cast<double*>(bBufPtr + i), m);
}
}
else
{
#pragma omp parallel for
foreach_row (i, c)
{
#pragma warning(suppress : 4244)
c(i, 0) = cblas_sdot(n, reinterpret_cast<float*>(aBufPtr + i), m, reinterpret_cast<float*>(bBufPtr + i), m);
}
}
}
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::GetARowByIndex(const CPUMatrix<ElemType>& a, size_t index)
{
if (a.IsEmpty())
LogicError("GetARowByIndex: the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
if (index < 0 || index >= m)
LogicError("GetARowByIndex: the row index is out of range.");
assert(m > 0 && n > 0); // converting from size_t to int may cause overflow
auto& us = *this;
RequireSize(1, n);
for (long j = 0; j < n; j++)
{
us(0, j) = a(index, j);
}
return *this;
}
// input: a, a row vector
// input: b, a matrix. b.col == a.col
// input firstmatrixfixed: If true, keep a's order. Otherwise, keep b's order
// output: c, a matrix. c.size == b.size
/*
Example, a = [a1 a2 a3]
b = [b11 b12 b13;
b21 b22 b23 ]
if true:
shift = 1
then c = [a1*b12 a2*b13 a3*b11
a1*b22 a2*b23 a3*b21]
if shift = 2
then c = [ a1*b13 a2*b11 a3*b12
a1*b23 a2*b21 a3*b22]
i.e. we do column-wise shift
if false:
shift = 1
then c = [a2*b11 a3*b12 a1*b13
a2*b21 a3*b22 a1*b23]
shift = 2
then c = [ a3*b11 a1*b12 a2*b13
a3*b21 a1*b22 a2*b23]
*/
template <class ElemType>
void CPUMatrix<ElemType>::ConductRowElementMultiplyWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, CPUMatrix<ElemType>& c, size_t shift, bool bFirstmatrixfixed)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("InnerProduct: one of the input matrices is empty.");
const int m = (int) a.GetNumRows();
const int n = (int) a.GetNumCols();
const int k = (int) b.GetNumRows();
const int l = (int) b.GetNumCols();
assert(m > 0 && n > 0 && k > 0 && l > 0); // converting from size_t to int may cause overflow
if (m != 1 || n != l)
InvalidArgument("InnerProduct: Matrices a and b should have same dimension.");
c.RequireSize(k, l); // c must the the same size of b
if (bFirstmatrixfixed)
{
for (long j = 0; j < l; j++)
{
for (long i = 0; i < k; i++)
{
c(i, j) = a(0, j) * b(i, (j + shift) % l);
}
}
}
else
{
for (long j = 0; j < l; j++)
{
for (long i = 0; i < k; i++)
{
c(i, j) = a(0, (j + shift) % l) * b(i, j);
}
}
}
}
// CPUMatrix<ElemType>& AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift);
//[this]=a .* b
// here, a and b must be two row vectors of the same size, i.e. [1,m]. We will do element product with shift.
// inputs are 2 row vectors
// output is a row vector
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignElementProductOfWithShift(const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, size_t shift)
{
if (a.IsEmpty() || b.IsEmpty())
LogicError("AssignElementProductOfWithShiftNeg: Matrix is empty.");
if (a.GetNumRows() != b.GetNumRows() || a.GetNumCols() != b.GetNumCols())
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix dimensions do not match.");
if (a.GetNumRows() != 1)
InvalidArgument("AssignElementProductOfWithShiftNeg: The input matrix must be a row vector.");
auto& us = *this;
if (this != &a)
{
RequireSize(1, a.GetNumCols());
// RequireSize(a.GetNumRows(), a.GetNumCols());
}
// long m = (long)GetNumRows(), n = (long)GetNumCols(); // a and b are of size (1,n)
long n = (long) GetNumCols(); // a and b are of size (1,n)
#pragma omp parallel for
for (long j = 0; j < n; j++)
{
us(0, j) = a(0, j) * b(0, (j + shift) % n);
}
return *this;
}
#pragma endregion Static BLAS Functions
// 'double' version of LogAdd
inline double LogAddD(double x, double y)
{
return LogAdd(x, y);
}
template <class ElemType>
ElemType CPUMatrix<ElemType>::LogSumOfElements() const
{
ElemType fAlpha = (ElemType) LZERO;
ElemType* bufPtr = Data();
for (int k = 0; k < GetNumElements(); k++)
fAlpha = (ElemType) LogAddD(fAlpha, bufPtr[k]);
return fAlpha;
}
template <class ElemType>
void CPUMatrix<ElemType>::RCRFBackwardCompute(const CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& pair_scores)
{
int iNumPos = (int) lbls.GetNumCols();
int iNumLab = (int) lbls.GetNumRows();
int lastLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, iNumPos - 1) != 0)
{
lastLbl = ik;
break;
}
beta.RequireSize(iNumLab, iNumPos);
for (int t = iNumPos - 1; t >= 0; t--)
{
#pragma omp parallel for
for (int k = 0; k < iNumLab; k++)
{
_rcrfBackwardCompute(t, k, alpha, beta, pair_scores);
}
}
};
// Calculate alpha in forward-backward calculation. equation (6), (7) in http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf
// GPU x dimension corresponds to utterances, y dimension corresponds to phone sequence in each utterance
// prob (input): the posterior output from the network
// alpha (output): alpha for forward-backward calculation.
// phoneSeq (input): phone ID sequence for each utterance in this minibatch, each col is one utterance
// phoneBound (input): phone boundary (frame index) of each phone for each utterance in this minibatch, each col is one utterance
// uttToChanInd (input): map from utterance ID to minibatch channel ID. We need this because each channel may contain more than one utterance.
// uttFrameNum (input): the frame number of each utterance. The size of this vector = the number of all utterances in this minibatch
// uttBeginFrame(input): the positon of the first frame of each utterance in the minibatch channel. We need this because each channel may contain more than one utterance.
// uttPhoneNum (input): the phone number of each utterance. The size of this vector = the number of all utterances in this minibatch
// numChannels (input): channel number in this minibatch
// uttNum (input): number of utterances
// t (input): time stamp to process
// maxPhoneNum (input): the max number of phones between utterances
// totalPhoneNum (input): the total number of phones of all utterances
// blankTokenId (input): id of the CTC blank token
// delayConstraint -- label output delay constraint introduced during training that allows to have shorter delay during inference.
// Alpha and Beta scores outside of the delay boundary are set to zero.
// Setting this parameter smaller will result in shorted delay between label output during decoding.
// delayConstraint=-1 means no constraint
template<class ElemType>
void _assignAlphaScore(
const ElemType *prob,
ElemType *alphaScore,
ElemType *phoneSeq,
ElemType *phoneBound,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttFrameNum,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
size_t numChannels,
const size_t uttNum,
const size_t t,
const size_t maxPhoneNum, // Maximum length of utterance in this MB
const size_t totalPhoneNum, // Total number of phones
const size_t blankTokenId,
const int delayConstraint)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
// Number of phones and frames in this utterance
size_t frameNum = uttFrameNum[uttId];
if (t >= frameNum) continue;
size_t phoneNum = uttPhoneNum[uttId];
#pragma omp parallel for
for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) {
// Index of the label in the sequence
// Current and previous phone indices in phoneSeq matrix
size_t labelid = uttId*maxPhoneNum + phoneSeqId;
// Actual current phone label
size_t phoneId = (size_t)(phoneSeq[labelid]);
// Index of the current frame in minibatch
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
// Index of probability of observing phoneId at frame timeId
size_t probId = timeId*totalPhoneNum + phoneId;
size_t alphaId = maxPhoneNum* timeId + phoneSeqId; // alpha_t(s)
if (t == 0)
{
// Initialize recursion
if (phoneSeqId == 1 || phoneSeqId == 2)
{
alphaScore[alphaId] = prob[probId];
}
}
else
{
if (phoneSeqId >= 1)
{
size_t timeId_1 = timeId - numChannels; // Index corresponding to (t-1)
size_t alphaId_0 = maxPhoneNum* timeId_1 + phoneSeqId; // alpha_{t-1}(s)
size_t alphaId_1 = alphaId_0 - 1; // alpha_{t-1}(s-1)
size_t alphaId_2 = alphaId_0 - 2; // alpha_{t-1}(s-2)
ElemType x = LZERO;
ElemType ascore;
if (phoneSeqId > 2)
{
size_t labelid_2 = labelid - 2;
// if current label is not blank and not equal prev non-blank label
if ((size_t)(phoneSeq[labelid]) != blankTokenId && phoneId != (size_t)(phoneSeq[labelid_2]))
{
x = LogAdd(x, alphaScore[alphaId_2]);
}
}
if (phoneSeqId > 1)
{
x = LogAdd(x, alphaScore[alphaId_1]);
}
x = LogAdd(x, alphaScore[alphaId_0]);
if (phoneId != SIZE_MAX)
ascore = prob[probId]; // Probability of observing given label at given time
else
ascore = 0;
alphaScore[alphaId] = (ElemType)x + ascore;
if (delayConstraint != -1)
{
size_t labelid_r = labelid + 2;
size_t phoneBoundId_r = (size_t)(phoneBound[labelid_r]);
if (phoneId == blankTokenId)
{
// only constraint right side
if (t > phoneBoundId_r + delayConstraint - 1)
alphaScore[alphaId] = LZERO;
}
else if (phoneId != blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint)
alphaScore[alphaId] = LZERO;
}
}
}
}
}
}
}
// Calculate beta in forward-backward calculation, equation (10), (11) in http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf
// See _assignAlphaScore for the explanation of parameters
template<class ElemType>
void _assignBetaScore(
const ElemType *prob,
ElemType *betaScore,
ElemType *phoneSeq,
ElemType *phoneBound,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttFrameNum,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
const size_t numChannels,
const size_t uttNum,
const long t,
const size_t maxPhoneNum,
const size_t totalPhoneNum,
const size_t blankTokenId,
const int delayConstraint)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
// Number of phones and frames in this utterance
size_t frameNum = uttFrameNum[uttId];
if (t >= frameNum) continue;
size_t phoneNum = uttPhoneNum[uttId];
#pragma omp parallel for
for (int phoneSeqId = 1;phoneSeqId < phoneNum - 1;phoneSeqId++) {
size_t labelid = uttId*maxPhoneNum + phoneSeqId;
size_t labelid_2 = labelid + 2;
size_t phoneId = (LONG64)(phoneSeq[labelid]);
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
size_t probId = timeId*totalPhoneNum + phoneId;
size_t betaid = maxPhoneNum* timeId + phoneSeqId;
size_t timeId_1 = timeId + numChannels;
size_t betaid_0 = maxPhoneNum* timeId_1 + phoneSeqId;
size_t betaid_1 = betaid_0 + 1;
size_t betaid_2 = betaid_0 + 2;
if (t == frameNum - 1)
{
if (phoneSeqId == phoneNum - 3 || phoneSeqId == phoneNum - 2)
{
betaScore[betaid] = prob[probId];
}
}
else
{
if (phoneSeqId >= 1)
{
ElemType x = LZERO;
ElemType ascore;
if (phoneSeqId < phoneNum - 3)
{
if (phoneSeq[labelid] != blankTokenId && phoneId != phoneSeq[labelid_2])
{
x = LogAdd(x, betaScore[betaid_2]);
}
}
if (phoneSeqId < phoneNum - 2)
{
x = LogAdd(x, betaScore[betaid_1]);
}
x = LogAdd(x, betaScore[betaid_0]);
if (phoneId != SIZE_MAX)
ascore = prob[probId];
else
ascore = 0;
betaScore[betaid] = (ElemType)x + ascore;
if (delayConstraint != -1)
{
size_t phoneBoundId_r = (size_t)(phoneBound[labelid_2]);
if (phoneId == blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint - 1)
betaScore[betaid] = LZERO;
}
else if (phoneId != blankTokenId)
{
if (t > phoneBoundId_r + delayConstraint)
betaScore[betaid] = LZERO;
}
}
}
}
}
}
}
// Calculate CTC score. equation (8) in http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf
template<class ElemType>
void _assignTotalScore(ElemType *betaScore,
std::vector<ElemType>& totalScore,
const size_t uttNum,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttBeginFrame,
const size_t numChannels,
const size_t maxPhoneNum)
{
#pragma omp parallel for
for (int uttId = 0; uttId < uttNum; uttId++) {
if (uttId < uttNum)
{
LONG64 alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum;
betaScore[alphaId_0] = LogAdd(betaScore[alphaId_0 + 1], betaScore[alphaId_0 + 2]);
totalScore[uttId] = betaScore[alphaId_0];
}
}
}
// Calculate derivative, equation (15) in http://machinelearning.wustl.edu/mlpapers/paper_files/icml2006_GravesFGS06.pdf
// See _assignAlphaScore for the explanation of parameters
template<class ElemType>
void _assignCTCScore(
ElemType *CTCscore,
ElemType *prob,
ElemType *alphaScore,
ElemType *betaScore,
ElemType *phoneSeq,
const size_t uttNum,
const std::vector<size_t>& uttToChanInd,
const std::vector<size_t>& uttBeginFrame,
const std::vector<size_t>& uttPhoneNum,
const std::vector<size_t>& uttFrameNum,
const size_t numChannels,
const size_t maxPhoneNum,
const size_t totalPhoneNum)
{
for (size_t uttId = 0;uttId < uttNum;uttId++) {
#pragma omp parallel for
for (int t = 0; t < uttFrameNum[uttId]; t++) {
size_t phoneNum = uttPhoneNum[uttId];
size_t alphaId_0 = (uttBeginFrame[uttId] * numChannels + uttToChanInd[uttId]) * maxPhoneNum;
size_t timeId = (t + uttBeginFrame[uttId])*numChannels + uttToChanInd[uttId];
ElemType P_lx = betaScore[alphaId_0];
for (int s = 1; s < phoneNum - 1; s++)
{
long phoneId = phoneSeq[uttId*maxPhoneNum + s];
size_t alphaId = maxPhoneNum* timeId + s;
size_t probId = timeId*totalPhoneNum + phoneId;
if (phoneId != SIZE_MAX)
{
ElemType logoccu = alphaScore[alphaId] + betaScore[alphaId] - prob[probId] - (ElemType)P_lx;
CTCscore[probId] = LogAdd(CTCscore[probId], logoccu);
}
}
for (int s = 0; s < totalPhoneNum; s++)
{
size_t probId = timeId*totalPhoneNum + s;
ElemType logoccu = CTCscore[probId];
if (logoccu < LZERO)
CTCscore[probId] = 0.0f;
else
CTCscore[probId] = exp(logoccu);
}
}
}
}
template<class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignCTCScore(
const CPUMatrix<ElemType>& prob, CPUMatrix<ElemType>& alpha, CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& phoneSeq, const CPUMatrix<ElemType>& phoneBoundary, ElemType &totalScore, const std::vector<size_t>& uttToChanInd, const std::vector<size_t> & uttBeginFrame, const std::vector<size_t> & uttFrameNum,
const std::vector<size_t> & uttPhoneNum, const size_t numParallelSequences, const size_t maxFrameNum, const size_t blankTokenId, const int delayConstraint, const bool isColWise)
{
// Column wise representation of sequences in input matrices (each column is one sequence/utterance)
if (isColWise)
{
// Total number of phones
size_t totalPhoneNum = prob.GetNumRows();
size_t uttNum = uttFrameNum.size();
// Max number of phones in utterances in this minibatch
size_t maxPhoneNum = phoneSeq.GetNumRows();
for (size_t t = 0; t < maxFrameNum; t++)
{
_assignAlphaScore(prob.Data(), alpha.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd,
uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint);
}
for (LONG64 t = maxFrameNum - 1; t >= 0; t--)
{
_assignBetaScore(prob.Data(), beta.Data(), phoneSeq.Data(), phoneBoundary.Data(), uttToChanInd,
uttFrameNum, uttBeginFrame, uttPhoneNum, numParallelSequences, uttNum, t, maxPhoneNum, totalPhoneNum, blankTokenId, delayConstraint);
}
std::vector<ElemType> scores(uttNum);
_assignTotalScore(beta.Data(), scores, uttNum, uttToChanInd, uttBeginFrame, numParallelSequences, maxPhoneNum);
_assignCTCScore(Data(), prob.Data(), alpha.Data(), beta.Data(), phoneSeq.Data(), uttNum, uttToChanInd,
uttBeginFrame, uttPhoneNum, uttFrameNum, numParallelSequences, maxPhoneNum, totalPhoneNum);
for (size_t utt = 0; utt < uttNum; utt++)
{
totalScore += scores[utt];
}
return *this;
}
else {
LogicError("Only ColWise minibatch layout is supported.");
}
return *this;
}
/// the kernel function for RCRF backward computation
template <class ElemType>
void CPUMatrix<ElemType>::_rcrfBackwardCompute(size_t t, size_t k, const CPUMatrix<ElemType>& alpha,
CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores)
{
size_t iNumLab = alpha.GetNumRows();
size_t iNumPos = alpha.GetNumCols();
ElemType fSum;
ElemType fTmp = (ElemType) LZERO;
if (t == iNumPos - 1)
{
fSum = (ElemType) LZERO;
for (int j = 0; j < iNumLab; j++)
{
fSum = (ElemType) LogAddD(fSum, alpha(j, t));
}
fTmp = alpha(k, t) - fSum;
beta(k, t) = fTmp;
}
else
{
for (int j = 0; j < iNumLab; j++)
{
fSum = (ElemType) LZERO;
for (int m = 0; m < iNumLab; m++)
{
fSum = (ElemType) LogAddD(fSum, alpha(m, t) + pair_scores(j, m));
}
fTmp = (ElemType) LogAddD(fTmp, beta(j, t + 1) + alpha(k, t) + pair_scores(j, k) - fSum);
}
beta(k, t) = fTmp;
}
}
template <class ElemType>
void CPUMatrix<ElemType>::RCRFTransGrdCompute(const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& alpha,
const CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores,
CPUMatrix<ElemType>& grd)
{
int iNumPos = (int) alpha.GetNumCols();
int iNumLab = (int) alpha.GetNumRows();
int firstLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, 0) != 0)
{
firstLbl = ik;
break;
}
for (size_t tPos = 0; tPos < iNumPos; tPos++)
{
CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1);
CPUMatrix<ElemType> a;
if (tPos > 0)
a = alpha.ColumnSlice(tPos - 1, 1);
#pragma omp parallel for
for (int i = 0; i < iNumLab; i++)
{
_rcrfTransGrdCompute(i, lbls, alpha, beta, pair_scores, grd, tPos);
}
// transition score
int i = -1;
if (tPos == 0)
i = firstLbl;
else
{
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, tPos - 1) != 0)
{
i = ik;
break;
}
}
int j = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
{
if (lbls(ik, tPos) != 0)
{
j = ik;
break;
}
}
grd(j, i) -= 1.0;
}
};
template <class ElemType>
void CPUMatrix<ElemType>::_rcrfTransGrdCompute(size_t i,
const CPUMatrix<ElemType>& lbls,
const CPUMatrix<ElemType>& alpha,
const CPUMatrix<ElemType>& beta,
const CPUMatrix<ElemType>& pair_scores,
CPUMatrix<ElemType>& grd,
const size_t tPos // position
)
{
int iNumLab = (int) alpha.GetNumRows();
int firstLbl = -1;
for (int ik = 0; ik < lbls.GetNumRows(); ik++)
if (lbls(ik, 0) != 0)
{
firstLbl = ik;
break;
}
CPUMatrix<ElemType> b = beta.ColumnSlice(tPos, 1);
CPUMatrix<ElemType> a;
if (tPos > 0)
a = alpha.ColumnSlice(tPos - 1, 1);
{
ElemType fTmp = (ElemType) LZERO;
for (int j = 0; j < iNumLab; j++)
{
if (tPos == 0)
{
if (i == firstLbl)
{
fTmp = 0;
}
else
{
fTmp = (ElemType) LZERO;
}
}
else
{
fTmp = a(i, 0);
}
fTmp += pair_scores(j, i);
ElemType fSum = (ElemType) LZERO;
for (int k = 0; k < iNumLab; k++)
{
ElemType fTmp2;
if (tPos == 0)
{
if (k == firstLbl)
{
fTmp2 = 0;
}
else
{
fTmp2 = (ElemType) LZERO;
}
}
else
{
fTmp2 = a(k, 0);
}
fSum = (ElemType) LogAddD(fSum, fTmp2 + pair_scores(j, k));
}
fTmp -= fSum;
fTmp += b(j, 0);
grd(j, i) += exp(fTmp);
}
}
};
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::DropFrame(const CPUMatrix<ElemType>& label, const CPUMatrix<ElemType>& gamma, const ElemType& threshhold)
{
auto& us = *this;
if (us.GetNumCols() != gamma.GetNumCols() || us.GetNumRows() != gamma.GetNumRows())
LogicError("DropFrame: target matrix is not in the same size as gamm matrix.");
#pragma omp parallel for
foreach_column (j, label)
{
bool dropframe = false;
foreach_row (i, label)
{
if (fabs(label(i, j) - 1.0f) < 0.1)
{
if (gamma(i, j) < threshhold)
dropframe = true;
break;
}
}
foreach_row (i, label)
{
us(i, j) = 0.0f;
}
}
return *this;
}
template <class ElemType>
CPUMatrix<ElemType>& CPUMatrix<ElemType>::AssignSequenceError(const ElemType hsmoothingWeight, const CPUMatrix<ElemType>& label,
const CPUMatrix<ElemType>& dnnoutput, const CPUMatrix<ElemType>& gamma, ElemType alpha)
{
auto& us = *this;
foreach_coord (i, j, us)
us(i, j) += alpha * (label(i, j) - (1 - hsmoothingWeight) * dnnoutput(i, j) - hsmoothingWeight * gamma(i, j));
return *this;
}
// note: this function does not depend on the <ElemType> parameter
template <class ElemType>
int CPUMatrix<ElemType>::SetNumThreads(int numThreads)
{
if (numThreads == 0) // use default
return numThreads;
int mthreads = (int) std::thread::hardware_concurrency();
if (numThreads <= 0)
numThreads = std::max(1, mthreads + numThreads);
if (numThreads > mthreads)
numThreads = mthreads;
#ifdef _OPENMP
omp_set_num_threads(numThreads);
numThreads = omp_get_max_threads();
#ifdef USE_MKL
mkl_set_num_threads(numThreads);
#elif defined(USE_OPENBLAS)
openblas_set_num_threads(numThreads);
#endif
#endif
return numThreads;
}
template <class ElemType>
int CPUMatrix<ElemType>::GetMaxNumThreads()
{
int numThreads = (int)std::thread::hardware_concurrency();
#ifdef _OPENMP
numThreads = omp_get_max_threads();
#endif
return numThreads;
}
// To ensure Intel MKL calls return the same results on all Intel or Intel compatible CPUs,
// the function set CBWR compatible mode.
template <class ElemType>
void CPUMatrix<ElemType>::SetCompatibleMode()
{
#ifdef USE_MKL
if (mkl_cbwr_set(MKL_CBWR_COMPATIBLE) != MKL_CBWR_SUCCESS)
RuntimeError("Could not set MKL compatible mode.");
#endif
}
// =======================================================================
// TensorView support
// =======================================================================
// To save time, this makes extensive use of templates and macros.
// -----------------------------------------------------------------------
// function to compute the value for a given output location (perform reduction if needed)
// -----------------------------------------------------------------------
// perform loop over reduction index m
// This function is declared inside a wrapper struct to allow partial specialization (m = -1).
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int m>
struct TensorOpReduction
{
// reduction case (non-reduction case is specialized)
static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction
for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled
strides[i] = reducingStrides[i][(size_t) m];
double aggregate = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides);
for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;)
{
// advance the pointers
for (size_t i = 0; i < N - 1; i++)
pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here
// need to descend into one loop deeper
aggregate = reductionOp(aggregate, TensorOpReduction<ElemType, OPFN, ReductionOp, N, m - 1>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides));
}
// Actually it would be nicer to return double but we keep ElementType so that test don't return different numbers than previous implementation.
return static_cast<double>(aggregate);
}
};
// perform loop over reduction index m
// This is the specialized version for m = -1, which terminates the recursion.
template <class ElemType, typename OPFN, typename ReductionOp, size_t N>
struct TensorOpReduction<ElemType, OPFN, ReductionOp, N, -1>
{
static inline ElemType Loop(array<ElemType*, N> pointers, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&)
{
return opfn(pointers); // finally we are doing some work!!!
}
};
// perform loop over reduction index m, while keeping track of the number of elements and their corresponding indices.
// This function is declared inside a wrapper struct to allow partial specialization (m = -1).
template <class ElemType, size_t N, int m>
struct TensorArgOpReduction
{
static inline std::pair<ElemType, size_t> ReduceAll(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides,
ElementWiseOperator reductionOp)
{
size_t counter = 0;
size_t index = 0;
ElemType val = (ElemType)0;
switch (reducingOpDims.size())
{
case 3:
val = TensorArgOpReduction<ElemType, N, 2>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 2:
val = TensorArgOpReduction<ElemType, N, 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 1:
val = TensorArgOpReduction<ElemType, N, 0>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
case 0:
val = TensorArgOpReduction<ElemType, N, -1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
break;
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)reducingOpDims.size());
}
return make_pair(val, index);
}
// reduction case (non-reduction case is specialized)
static inline ElemType Loop(array<ElemType*, N> pointers, const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides,
ElementWiseOperator reductionOp, size_t& counter, size_t& index)
{
array<ptrdiff_t, N - 1> strides; // N-1 because last one is the result pointer, which is unused in reduction
for (size_t i = 0; i < N - 1; i++) // N = a small constant, this will be unrolled
strides[i] = reducingStrides[i][(size_t)m];
ElemType aggregate = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
for (size_t dim = reducingOpDims[(size_t)m] - 1; dim-- > 0;)
{
// advance the pointers
for (size_t i = 0; i < N - 1; i++)
pointers[i] += strides[i]; // note: last pointer (result) is unused and untouched here
ElemType val = TensorArgOpReduction<ElemType, N, m - 1>::Loop(pointers, reducingOpDims, reducingStrides, reductionOp, counter, index);
bool update = false;
switch (reductionOp)
{
case ElementWiseOperator::opArgmin:
update = (aggregate > val);
break;
case ElementWiseOperator::opArgmax:
update = (aggregate < val);
break;
}
if (update)
{
aggregate = val;
index = counter - 1;
}
}
return aggregate;
}
};
// perform loop over reduction index m
// This is the specialized version for m = -1, which terminates the recursion.
template <class ElemType, size_t N>
struct TensorArgOpReduction<ElemType, N, -1>
{
static inline ElemType Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&, ElementWiseOperator reductionOp, size_t& counter, size_t& index)
{
counter++;
return *pointers[0]; // finally we are doing some work!!!
}
};
// -----------------------------------------------------------------------
// perform loop over regular index k for N-nary operations (N counting the output)
// -----------------------------------------------------------------------
// perform loop over regular index k and reducing index m for N operands (counting the output)
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m, int k>
struct TensorOpIteration
{
static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// non-scalar case: still nested result loops left
array<ptrdiff_t, N> strides;
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
strides[i] = regularStrides[i][(size_t) k];
for (size_t dim = regularOpDims[(size_t) k]; dim-- > 0;)
{
// need to descend into one loop deeper
TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, k - 1>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
// advance the pointers
for (size_t i = 0; i < N; i++)
pointers[i] += strides[i];
}
}
};
// Special version for innermost loop with strides all being 1 and no further reduction. Compiler can use SSE.
// This is a very common case, e.g. adding vectors or computing the Sigmoid.
template <class ElemType, typename OPFN, typename ReductionOp>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/>
{
static inline void Loop(ElemType beta, array<ElemType*, 3> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides)
{
ElemType* pa = pointers[0];
ElemType* pb = pointers[1];
ElemType* pc = pointers[2];
size_t K = regularOpDims[0];
// special-case beta and alpha to allow the compiler to short-circuit it
if (beta != 0)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else if (alpha != 1)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 3, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 3>{pa + k, pb + k, pc + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
// TODO: According to Amit, the VS compiler is not able to vectorize into lambdas. Solution: change the lambda to take an N, or to implement the loop inside (with 1 element by default).
// TODO: The signedness of k (required for omp) causes an extra sign-extend.
// TODO: OMP adds LOTS of overhead. Do we need a guard, a min size when to use it?
}
};
// and unary
template <class ElemType, typename OPFN, typename ReductionOp>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, 0 /*innermost loop*/>
{
static inline void Loop(ElemType beta, array<ElemType*, 2> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
ElemType* pa = pointers[0];
ElemType* pb = pointers[1];
size_t K = regularOpDims[0];
// special-case beta and alpha to allow the compiler to short-circuit it
if (beta != 0)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(beta, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else if (alpha != 1)
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
#pragma omp parallel for
for (int k = 0; k < (int) K; k++)
TensorOpIteration<ElemType, OPFN, ReductionOp, 2, true /*vectorizable*/, -1 /*no reduction*/, -1 /*scalar*/>::Loop(0, array<ElemType*, 2>{pa + k, pb + k}, 1, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
};
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, bool vectorizable, int m>
struct TensorOpIteration<ElemType, OPFN, ReductionOp, N, vectorizable, m, -1>
{
static inline void Loop(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// we are at element level for the result: perform the op (there may still be reduction)
ElemType val = TensorOpReduction<ElemType, OPFN, ReductionOp, N, m>::Loop(pointers, opfn, reductionOp, reducingOpDims, reducingStrides);
// scale
val *= alpha;
// combine with previous value in target matrix, then write it out
auto* pout = pointers.back();
if (beta != 0)
val += beta * *pout;
// save
*pout = val;
return;
}
};
// perform loop over regular index k and reducing index m for N operands (counting the output), the difference
// between TensorOpIteration and TensorArgOpIteration, is that the latter store the index of the result, instead of
// the result. The reason that they aren't combined is because of performance.
template <class ElemType, size_t N, int k>
struct TensorArgOpIteration
{
static inline void Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp)
{
// non-scalar case: still nested result loops left
array<ptrdiff_t, N> strides;
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
strides[i] = regularStrides[i][(size_t)k];
for (size_t dim = regularOpDims[(size_t)k]; dim-- > 0;)
{
// need to descend into one loop deeper
TensorArgOpIteration<ElemType, N, k - 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
// advance the pointers
for (size_t i = 0; i < N; i++)
pointers[i] += strides[i];
}
}
};
template <class ElemType, size_t N>
struct TensorArgOpIteration<ElemType, N, -1>
{
static inline void Loop(array<ElemType*, N> pointers,
const SmallVector<size_t>&, const array<SmallVector<ptrdiff_t>, N>&,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides, ElementWiseOperator reductionOp)
{
// we are at element level for the result: perform the op (there may still be reduction)
auto val = TensorArgOpReduction<ElemType, N, 2>::ReduceAll(pointers, reducingOpDims, reducingStrides, reductionOp);
auto* pout = pointers.back();
*pout = (ElemType)val.second;
return;
}
};
// -----------------------------------------------------------------------
// map runtime parameters N to template parameters
// -----------------------------------------------------------------------
// tensor operation with k+1 dimensions (-1 means scalar)
template <class ElemType, typename OPFN, typename ReductionOp, size_t N, int k>
static void TensorOpWithRegularLoop(ElemType beta, const array<ElemType*, N>& pointers, ElemType alpha, const OPFN& opfn, ReductionOp reductionOp,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
size_t dims = reducingOpDims.size();
switch (dims)
{
case 2:
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 1:
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, 0, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 0:
{
// if all leading dimensions are 1, we can let the compiler do some unrolling
bool leadingAllOne = true;
for (size_t i = 0; i < N; i++)
leadingAllOne &= k >= 0 && regularStrides[i][0] == 1;
if (leadingAllOne) // special version that uses a hard-coded increment of 1 for all leading dimensions
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, true /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
else
return TensorOpIteration<ElemType, OPFN, ReductionOp, N, false /*vectorizable*/, -1, k>::Loop(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
}
default:
LogicError("TensorOp: %d non-flattened reduction dimensions are not supported.", (int) dims);
}
}
// tensor operation, generalized in number of arguments, operation already provided as a lambda
// This function now expands into different k.
template <class ElemType, typename OPFN, typename ReductionOp, size_t N>
static void TensorOpWithFnAndReduction(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, const ReductionOp& reductionOp,
const array<size_t, N>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
for (size_t i = 0; i < N; i++) // N = a small constant, this will be unrolled
pointers[i] += offsets[i];
size_t dims = regularOpDims.size();
switch (dims)
{
case 4:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 3>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 3:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 2>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 2:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 1:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, 0>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
case 0:
return TensorOpWithRegularLoop<ElemType, OPFN, ReductionOp, N, -1>(beta, pointers, alpha, opfn, reductionOp, regularOpDims, regularStrides, reducingOpDims, reducingStrides);
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)dims);
}
}
// tensor operation, generalized in number of arguments, operation already provided as a lambda
// This function now expands into different reductionOps
template <class ElemType, typename OPFN, size_t N>
static void TensorOpWithFn(ElemType beta, array<ElemType*, N> pointers, ElemType alpha, const OPFN& opfn, ElementWiseOperator reductionOp,
const array<size_t, N>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, N>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, N>& reducingStrides)
{
// BUGBUG: Using always 'double' as type of aggregator even for ElemType==float. Reason: otherwise some e2e test would fail as historically we
// used double for aggregator of sum. But:
// * for min and max reductions this is meaningless.
// * It is not consitent with what we do on GPU, there we aggregate on ElemType.
// * It costs performance.
// TODO: apdapt e2e tests to run with aggregator of type ElemType.
#define CaseTensorOpWithFnAndReduction(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFnAndReduction(beta, pointers, alpha, opfn, [](double a, double b) \
{ \
return Op##oper(a, b); \
}, \
offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
switch (reductionOp)
{
CaseTensorOpWithFnAndReduction(Sum);
CaseTensorOpWithFnAndReduction(LogSum);
CaseTensorOpWithFnAndReduction(Min);
CaseTensorOpWithFnAndReduction(Max);
CaseTensorOpWithFnAndReduction(ElementwiseProduct);
default:
LogicError("Specified ElementWiseOperator op %d not suported as reduction operation.", (int)reductionOp);
}
}
// -----------------------------------------------------------------------
// entry points from Matrix.cpp; also map op to a lambda
// -----------------------------------------------------------------------
// perform unary operation 'op' on a giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum &&
reductionOp != ElementWiseOperator::opLogSum &&
reductionOp != ElementWiseOperator::opMin &&
reductionOp != ElementWiseOperator::opMax &&
reductionOp != ElementWiseOperator::opElementwiseProduct)
InvalidArgument("TensorOp: Unary reduction operations other than opMax, opMin, opSum, and opLogSum are not implemented.");
// TODO: Change the lambda to take a pointer and a number of elements, so that we can pass it 1 or 4 elements, in order for it to SSE-vectorize.
#define CaseUnaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 2>& pp) \
{ \
return Op##oper((*(pp[0]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 2> pointers = {a.Data(), Data()};
switch (op)
{
ForAllUnaryOps(CaseUnaryTensorOp);
default:
LogicError("TensorOp: Unknown unary op code %d.", (int) op);
}
}
// perform binary operation 'op' on a and b giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 3>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 3>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 3>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum)
InvalidArgument("TensorOp (binary): The only permitted binary reduction operation is opSum.");
#define CaseBinaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 3>& pp) \
{ \
return Op##oper((*(pp[0])), (*(pp[1]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 3> pointers = {a.Data(), b.Data(), Data()};
switch (op)
{
ForAllBinaryOps(CaseBinaryTensorOp);
default:
LogicError("TensorOp: Unknown op binary code %d.", (int) op);
}
}
// perform ternary operation 'op' on a, and c giving 'this', reinterpreting the matrices as tensors as specified by the dims and strides
// This maps 'op' to a lambda.
template <class ElemType>
void CPUMatrix<ElemType>::TensorOp(ElemType beta, const CPUMatrix<ElemType>& a, const CPUMatrix<ElemType>& b, const CPUMatrix<ElemType>& c, ElemType alpha, ElementWiseOperator op, ElementWiseOperator reductionOp,
const array<size_t, 4>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 4>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 4>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opSum)
InvalidArgument("TensorOp: The only permitted ternary reduction operation is opSum.");
#define CaseTernaryTensorOp(oper) \
case ElementWiseOperator::op##oper: \
return TensorOpWithFn(beta, pointers, alpha, [](const array<ElemType*, 4>& pp) \
{ \
return Op##oper((*(pp[0])), (*(pp[1])), (*(pp[2]))); \
}, \
reductionOp, offsets, regularOpDims, regularStrides, reducingOpDims, reducingStrides)
array<ElemType*, 4> pointers = {a.Data(), b.Data(), c.Data(), Data()};
switch (op)
{
ForAllTernaryOps(CaseTernaryTensorOp);
default:
LogicError("TensorOp: Unknown ternary op code %d.", (int) op);
}
}
template <class ElemType>
int CPUMatrix<ElemType>::Argmin() const
{
int minArg = -1;
ElemType minValue = std::numeric_limits<ElemType>::max();
#pragma omp parallel
{
int localMinArg = -1;
ElemType localMinValue = std::numeric_limits<ElemType>::max();
#pragma omp for
for (int index = 0; index < (int)GetNumElements(); ++index)
{
if (localMinValue > Data()[index])
{
localMinArg = index;
localMinValue = Data()[index];
}
// If we have more then one min value, select the one with lower index.
else if ((localMinValue == Data()[index]) && (localMinArg > index))
{
localMinArg = index;
}
}
#pragma omp critical
{
if (minValue > localMinValue)
{
minArg = localMinArg;
minValue = localMinValue;
}
// If we have more then one min value, select the one with lower index.
else if ((minValue == localMinValue) && (minArg > localMinArg))
{
minArg = localMinArg;
}
}
}
return minArg;
}
template <class ElemType>
int CPUMatrix<ElemType>::Argmax() const
{
int maxArg = -1;
ElemType maxValue = std::numeric_limits<ElemType>::min();
#pragma omp parallel
{
int localMaxArg = -1;
ElemType localMaxValue = std::numeric_limits<ElemType>::min();
#pragma omp for
for (int index = 0; index < (int)GetNumElements(); ++index)
{
if (localMaxValue < Data()[index])
{
localMaxArg = index;
localMaxValue = Data()[index];
}
// If we have more then one max value, select the one with lower index.
else if ((localMaxValue == Data()[index]) && (localMaxArg > index))
{
localMaxArg = index;
}
}
#pragma omp critical
{
if (maxValue < localMaxValue)
{
maxArg = localMaxArg;
maxValue = localMaxValue;
}
// If we have more then one max value, select the one with lower index.
else if ((maxValue == localMaxValue) && (maxArg > localMaxArg))
{
maxArg = localMaxArg;
}
}
}
return maxArg;
}
template <class ElemType>
int CPUMatrix<ElemType>::ArgOp(ElementWiseOperator reductionOp) const
{
switch (reductionOp)
{
case ElementWiseOperator::opArgmin:
return Argmin();
break;
case ElementWiseOperator::opArgmax:
return Argmax();
break;
}
InvalidArgument("ArgOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented.");
return -1;
}
template <class ElemType>
void CPUMatrix<ElemType>::TensorArgOp(const CPUMatrix<ElemType>& a, ElementWiseOperator reductionOp,
const array<size_t, 2>& offsets,
const SmallVector<size_t>& regularOpDims, const array<SmallVector<ptrdiff_t>, 2>& regularStrides,
const SmallVector<size_t>& reducingOpDims, const array<SmallVector<ptrdiff_t>, 2>& reducingStrides)
{
if (reductionOp != ElementWiseOperator::opArgmin &&
reductionOp != ElementWiseOperator::opArgmax)
InvalidArgument("TensorOp: Arg reduction operations other than opArgmax, and opArgmin are not implemented.");
if (GetNumElements() == 1)
{
Data()[0] = (ElemType) a.ArgOp(reductionOp);
}
else
{
const size_t N = 2;
array<ElemType*, N> pointers = { a.Data(), Data() };
for (size_t i = 0; i < N; i++)
pointers[i] += offsets[i];
switch (regularOpDims.size())
{
case 2:
TensorArgOpIteration<ElemType, N, 1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
case 1:
TensorArgOpIteration<ElemType, N, 0>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
case 0:
TensorArgOpIteration<ElemType, N, -1>::Loop(pointers, regularOpDims, regularStrides, reducingOpDims, reducingStrides, reductionOp);
break;
default:
LogicError("TensorOp: %d non-flattened input dimensions are not supported.", (int)regularOpDims.size());
}
}
}
// We use Matrix<char> as the backing store for QuantizedMatrix
// Let's explicitly instantiate the methods we need for that purpose
template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols);
template CPUMatrix<char>::CPUMatrix(const size_t numRows, const size_t numCols, char* pArray, const size_t matrixFlags);
template CPUMatrix<char>::CPUMatrix();
template CPUMatrix<char>::CPUMatrix(CPUMatrix<char> const&);
template CPUMatrix<char>::CPUMatrix(CPUMatrix<char>&&);
template size_t CPUMatrix<char>::LocateElement(size_t, size_t) const;
template CPUMatrix<char> CPUMatrix<char>::ColumnSlice(size_t startColumn, size_t numCols) const;
template CPUMatrix<char>& CPUMatrix<char>::operator=(CPUMatrix<char>&&);
template void CPUMatrix<char>::SetValue(const char);
template void CPUMatrix<char>::SetValue(const size_t numRows, const size_t numCols, char* pArray, size_t matrixFlags);
template void CPUMatrix<char>::SetValue(CPUMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(GPUMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(CPUSparseMatrix<char> const&);
//template void CPUMatrix<char>::SetValue(GPUSparseMatrix<char> const&);
template void CPUMatrix<char>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly);
template void CPUMatrix<char>::Resize(const size_t numRows, const size_t numCols, bool growOnly);
template char* CPUMatrix<char>::CopyToArray(void) const;
template void CPUMatrix<char>::CopySection(size_t numRows, size_t numCols, char* dst, size_t colStride) const;
template void CPUMatrix<char>::Reshape(const size_t, const size_t);
// Support <short>
template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols);
template CPUMatrix<short>::CPUMatrix(const size_t numRows, const size_t numCols, short* pArray, const size_t matrixFlags);
template CPUMatrix<short>::CPUMatrix();
template CPUMatrix<short>::CPUMatrix(CPUMatrix<short> const&);
template CPUMatrix<short>::CPUMatrix(CPUMatrix<short>&&);
template size_t CPUMatrix<short>::LocateElement(size_t, size_t) const;
template CPUMatrix<short> CPUMatrix<short>::ColumnSlice(size_t startColumn, size_t numCols) const;
template CPUMatrix<short>& CPUMatrix<short>::operator=(CPUMatrix<short>&&);
template void CPUMatrix<short>::SetValue(const short);
template void CPUMatrix<short>::SetValue(const size_t numRows, const size_t numCols, short* pArray, size_t matrixFlags);
template void CPUMatrix<short>::SetValue(CPUMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(GPUMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(CPUSparseMatrix<short> const&);
//template void CPUMatrix<short>::SetValue(GPUSparseMatrix<short> const&);
template void CPUMatrix<short>::RequireSize(const size_t numRows, const size_t numCols, bool growOnly);
template void CPUMatrix<short>::Resize(const size_t numRows, const size_t numCols, bool growOnly);
template short* CPUMatrix<short>::CopyToArray(void) const;
template void CPUMatrix<short>::CopySection(size_t numRows, size_t numCols, short* dst, size_t colStride) const;
template void CPUMatrix<short>::Reshape(const size_t, const size_t);
template CPUMatrix<int>::CPUMatrix(const size_t, const size_t, int*, const size_t);
}}}
|
ObstacleFromShape.h | //
// Cubism3D
// Copyright (c) 2019 CSE-Lab, ETH Zurich, Switzerland.
// Distributed under the terms of the MIT license.
//
// Created by Ivica Kicic (kicici@ethz.ch) in July 2019.
//
#ifndef CubismUP_3D_ObstacleFromShape_h
#define CubismUP_3D_ObstacleFromShape_h
#include "Obstacle.h"
#include "extra/ObstacleLibrary.h"
#include <utility>
#include <type_traits>
CubismUP_3D_NAMESPACE_BEGIN
/*
* An obstacle whose shape is defined by a template argument Shape.
*
* Shape should implement the following method:
*
* // Returns whether the shape intersects an axis-aligned bounding box.
* bool isTouching(std::array<Real, 3> low, std::array<Real, 3> high);
*
* // Signed distance to the shape surface (>0 inside, <0 outside).
* Real signedDistance(<array of 3 Reals> position);
*
* // Center of mass velocity.
* <array of 3 Reals> comVelocity();
*
* // Local velocity relative to the center-of-mass velocity.
* <array of 3 Reals> localRelativeVelocity(<array of 3 Reals> position);
*
* // Factor [0..1] multiplying the lambda,
* // used to gradually adding the obstacle to the flow.
* Real lambdaFactor();
*
* // Set the current time, for time-dependent shapes.
* void setTime(Real);
*/
template <typename Shape>
class ObstacleFromShape : public Obstacle
{
using position_type = decltype(FluidBlock::min_pos);
// Expected functions in `Shape`.
static_assert(std::is_convertible<
decltype(std::declval<Shape>().isTouching(
position_type{}, position_type{})), bool>::value);
static_assert(std::is_convertible<
decltype(std::declval<Shape>().signedDistance({Real(), Real(), Real()})),
Real>::value);
// Here we check only that the velocity result behaves like an array.
static_assert(std::is_convertible<
decltype(std::declval<Shape>().comVelocity()[0]), Real>::value);
static_assert(std::is_convertible<
decltype(std::declval<Shape>().localRelativeVelocity(
{Real(), Real(), Real()})[0]), Real>::value);
static_assert(std::is_convertible<
decltype(std::declval<Shape>().lambdaFactor(Real())), Real>::value);
static_assert(std::is_same<
decltype(std::declval<Shape>().setTime(Real())), void>::value);
public:
ObstacleFromShape(SimulationData &s,
const ObstacleArguments &args,
Shape shape) :
Obstacle(s, args),
shape_(std::move(shape))
{ }
void computeVelocities() override
{
Obstacle::computeVelocities();
auto &&v = shape_.comVelocity();
transVel[0] = transVel_imposed[0] = v[0];
transVel[1] = transVel_imposed[1] = v[1];
transVel[2] = transVel_imposed[2] = v[2];
}
void create() override
{
shape_.setTime(sim.time);
printf("Cubism step = %d time = %lg Uinf = [%lg %lg %lg]\n",
sim.step, sim.time, sim.uinf[0], sim.uinf[1], sim.uinf[2]);
// Read the new value of the lambda factor.
lambda_factor = shape_.lambdaFactor(sim.time);
const FillBlocks kernel{shape_};
create_base(kernel);
const std::vector<cubism::BlockInfo> &vInfo = sim.vInfo();
#pragma omp parallel for schedule(dynamic, 1)
for (int i = 0; i < (int)vInfo.size(); ++i) {
const cubism::BlockInfo &info = vInfo[i];
if (obstacleBlocks[info.blockID] != nullptr)
kernel.setVelocity(info, obstacleBlocks[info.blockID]);
}
}
void finalize() override
{
// this method allows any computation that requires the char function
// to be computed. E.g. compute the effective center of mass or removing
// momenta from udef
}
private:
struct FillBlocks : FillBlocksBase<FillBlocks>
{
FillBlocks(Shape &shape) : shape_(shape) { }
bool isTouching(const FluidBlock &b) const
{
return shape_.isTouching(b.min_pos, b.max_pos);
}
Real signedDistance(const Real x, const Real y, const Real z) const
{
return shape_.signedDistance({x, y, z});
}
void setVelocity(const cubism::BlockInfo &info,
ObstacleBlock * const o) const {
for (int iz = 0; iz < FluidBlock::sizeZ; ++iz)
for (int iy = 0; iy < FluidBlock::sizeY; ++iy)
for (int ix = 0; ix < FluidBlock::sizeX; ++ix) {
const std::array<Real, 3> p = info.pos<Real>(ix, iy, iz);
auto &&udef = shape_.localRelativeVelocity(p);
o->udef[iz][iy][ix][0] = udef[0];
o->udef[iz][iy][ix][1] = udef[1];
o->udef[iz][iy][ix][2] = udef[2];
}
}
private:
Shape &shape_;
};
Shape shape_;
};
CubismUP_3D_NAMESPACE_END
#endif
|
omp_alloc_def_fb.c | // RUN: %libomp-compile-and-run
#include <stdio.h>
#include <omp.h>
int main() {
omp_alloctrait_t at[2];
omp_allocator_handle_t a;
void *p[2];
at[0].key = omp_atk_pool_size;
at[0].value = 2 * 1024 * 1024;
at[1].key = omp_atk_fallback;
at[1].value = omp_atv_default_mem_fb;
a = omp_init_allocator(omp_large_cap_mem_space, 2, at);
printf("allocator large created: %p\n", a);
#pragma omp parallel num_threads(2)
{
int i = omp_get_thread_num();
p[i] = omp_alloc(1024 * 1024, a);
#pragma omp barrier
printf("th %d, ptr %p\n", i, p[i]);
omp_free(p[i], a);
}
// Both pointers should be non-NULL
if (p[0] != NULL && p[1] != NULL) {
printf("passed\n");
return 0;
} else {
printf("failed: pointers %p %p\n", p[0], p[1]);
return 1;
}
}
|
SE1P_rsrc_cell.c | #include "mex.h"
#include "mex_compat.h"
#include "math.h"
#include "cell_list.h"
#ifdef INTEL_MKL
#include "mkl.h"
#endif
#define X prhs[0] // Source locations
#define F prhs[1] // Source strengths
#define RC prhs[2] // cutoff
#define XI prhs[3] // Ewald Param
#define P prhs[4] // Periodic wrap
#define BOX prhs[5] // domain size
#define U plhs[0] // Output
#ifndef VERBOSE
#define VERBOSE 0
#endif
#ifdef _OPENMP
#define CRITICAL _Pragma("omp critical")
#else
#define CRITICAL
#endif
#define PI 3.141592653589793
inline double norm2(double * a)
{
return a[0]*a[0] + a[1]*a[1] + a[2]*a[2];
}
// Compute buffer stuff
#define BUF_SIZE 256
typedef struct {
int n;
int idx_t[BUF_SIZE];
double rvec[3*BUF_SIZE];
double rsq[BUF_SIZE];
} ComputeBuffer;
static void buffer_push(ComputeBuffer* buffer, int idx_t, double* rvec, double rsq)
{
int n = buffer->n;
buffer->idx_t[n] = idx_t;
for(int i=0; i<3; i++)
buffer->rvec[3*n + i] = rvec[i];
buffer->rsq[n] = rsq;
buffer->n = n + 1;
}
static void empty_buffer(ComputeBuffer* buffer,
const double* restrict x,
const double* restrict f,
double* restrict u,
int idx_s,
double xi)
{
int N = buffer->n;
int idx_t;
double fs, ft;
double us = 0;
fs = f[idx_s];
// Do what we can to help the compiler vectorize exp and erfc, if possible
const double* restrict r2 = buffer->rsq;
double c1[BUF_SIZE];
#ifdef INTEL_MKL
double r[BUF_SIZE];
double xir[BUF_SIZE];
double xi2r2[BUF_SIZE];
#pragma ivdep
for (int n=0; n<N; n++)
{
r[n] = sqrt(r2[n]);
xir[n] = xi*r[n];
xi2r2[n] = -xi2*r2[n];
}
double erfc_vec[BUF_SIZE];
double exp_vec[BUF_SIZE];
vdErfc(N, xir, erfc_vec);
vdExp(N, xi2r2, exp_vec);
#pragma ivdep
for (int n=0; n<N; n++)
{
double xiexp = xi*exp_vec[n];
c1[n] = erfc_vec[n] / r[n];
}
#else
for (int n=0; n<N; n++)
{
double r = sqrt(r2[n]);
c1[n] = erfc(xi*r) / r;
}
#endif
// Compute interactions
#ifdef INTEL_MKL
#pragma ivdep
#endif
for (int n=0; n<N; n++)
{
idx_t = buffer->idx_t[n];
ft = f[idx_t];
u[idx_t] += fs*c1[n];
us += ft*c1[n];
}
u[idx_s] += us;
buffer->n = 0;
}
// Entry point
void
mexFunction( int nlhs, mxArray *plhs[],
int nrhs, const mxArray *prhs[] )
{
// input target
const int N = mxGetN(X);
const double xi = (double) mxGetScalar(XI);
const double rc = (double) mxGetScalar(RC);
const double rcsq = rc*rc;
const double p = (double) mxGetScalar(P);
const double* x = mxGetPr(X);
const double* f = mxGetPr(F);
const double* box = mxGetPr(BOX);
// output
U = mxCreateDoubleMatrix(N, 1, mxREAL);
double* restrict u_out = mxGetPr(U);
// Setup cell list variables
int ncell[3];
int* restrict cell_list;
int* restrict cell_idx;
double rn;
int px[27] = {-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1,-1, 0, 1};
int py[27] = {-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1,-1,-1,-1, 0, 0, 0, 1, 1, 1};
int pz[27] = {-1,-1,-1,-1,-1,-1,-1,-1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1};
// Build cell list
build_cell_list(x, N, box, rc, &rn, ncell, &cell_list, &cell_idx);
#ifdef _OPENMP
#pragma omp parallel
#endif
{ // Begin parallel section
// Create a thread-local compute buffer
ComputeBuffer buffer;
// Setup local output
double* restrict u;
CRITICAL {
u = __MALLOC(N*sizeof(double));
}
for(int i=0;i<N;i++)
u[i] = 0.0;
// Main loop
#ifdef _OPENMP
#pragma omp for schedule(dynamic) nowait
#endif
for (int idx_s=0; idx_s<N; idx_s++)
{
double xs[3];
int home_cell[3], icell[3];
for(int i=0; i<3; i++)
{
// Source point
xs[i] = x[idx_s*3 + i];
// Determine home cell
home_cell[i] = xs[i]/rn;
}
// Iterate through near cells (including home cell)
buffer.n = 0;
for(int ip=0; ip<27; ip++)
{
// Get neigh cell
icell[0] = home_cell[0] + px[ip];
icell[1] = home_cell[1] + py[ip];
icell[2] = home_cell[2] + pz[ip];
// Stop at boundaries
int inside = 1;
for(int j=0; j<3; j++)
{
if (icell[j] < 0 || icell[j] == ncell[j])
inside = 0;
}
if (!inside)
continue;
int icell_idx =
icell[0] +
icell[1]*ncell[0] +
icell[2]*ncell[1]*ncell[0];
// Go through cell list
int cell_a = cell_idx[icell_idx];
int cell_b = cell_idx[icell_idx+1];
for(int point_idx=cell_a; point_idx<cell_b; point_idx++)
{
int idx_t = cell_list[point_idx];
if (idx_s >= idx_t)
continue;
double rvec[3];
// periodic wrap
for (int j=-p; j<=p; j++)
{
double pshift[] = {j*box[0],0,0};
for(int i=0; i<3; i++)
rvec[i] = xs[i] - x[idx_t*3 + i] - pshift[i];
double r2 = norm2(rvec);
if (r2 > rcsq)
continue;
buffer_push(&buffer, idx_t, rvec, r2);
if (buffer.n == BUF_SIZE)
empty_buffer(&buffer, x, f, u, idx_s, xi);
}
}
}
empty_buffer(&buffer, x, f, u, idx_s, xi);
}
// End of particle loop, collect results
CRITICAL {
for(int i=0; i<N; i++)
u_out[i] += u[i];
}
// free/malloc not thread safe under MEX
CRITICAL {
__FREE(u);
}
}
}
|
rawSHA256_fmt_plug.c | /*
* This file is part of John the Ripper password cracker,
* Copyright (c) 2010 by Solar Designer
* based on rawMD4_fmt.c code, with trivial changes by groszek.
*
* Understands hex hashes as well as Cisco "type 4" base64.
*
* Rewritten Spring 2013, JimF. SSE code added and released with the following terms:
* No copyright is claimed, and the software is hereby placed in the public domain.
* In case this attempt to disclaim copyright and place the software in the public
* domain is deemed null and void, then the software is Copyright (c) 2011 JimF
* and it is hereby released to the general public under the following
* terms:
*
* This software may be modified, redistributed, and used for any
* purpose, in source and binary forms, with or without modification.
*/
#if FMT_EXTERNS_H
extern struct fmt_main fmt_rawSHA256;
#elif FMT_REGISTERS_H
john_register_one(&fmt_rawSHA256);
#else
#include "arch.h"
#include "sha2.h"
#include "stdint.h"
#include "params.h"
#include "common.h"
#include "johnswap.h"
#include "formats.h"
#include "rawSHA256_common.h"
#ifdef _OPENMP
#ifdef MMX_COEF_SHA256
#define OMP_SCALE 1024
#else
#define OMP_SCALE 2048
#endif
#include <omp.h>
#endif
#include "sse-intrinsics.h"
#include "memdbg.h"
#define FORMAT_LABEL "Raw-SHA256"
#define FORMAT_NAME ""
#ifdef MMX_COEF_SHA256
#define ALGORITHM_NAME SHA256_ALGORITHM_NAME
#else
#define ALGORITHM_NAME "32/" ARCH_BITS_STR " " SHA2_LIB
#endif
/* Note: Cisco hashes are truncated at length 25. We currently ignore this. */
#ifdef MMX_COEF_SHA256
#define PLAINTEXT_LENGTH 55
#else
#define PLAINTEXT_LENGTH 125
#endif
#define BINARY_SIZE 32
#define BINARY_ALIGN MEM_ALIGN_WORD
#define SALT_SIZE 0
#define SALT_ALIGN 1
#define MIN_KEYS_PER_CRYPT 1
#ifdef MMX_COEF_SHA256
#define MAX_KEYS_PER_CRYPT MMX_COEF_SHA256
#else
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests tests[] = {
{"71c3f65d17745f05235570f1799d75e69795d469d9fcb83e326f82f1afa80dea", "epixoip"},
{HEX_TAG "71c3f65d17745f05235570f1799d75e69795d469d9fcb83e326f82f1afa80dea", "epixoip"},
{"25b64f637b373d33a8aa2b7579784e99a20e6b7dfea99a71af124394b8958f27", "doesthiswork"},
{"5e884898da28047151d0e56f8dc6292773603d0d6aabbdd62a11ef721d1542d8", "password"},
{"27c6794c8aa2f70f5f6dc93d3bfb25ca6de9b0752c8318614cbd4ad203bea24c", "ALLCAPS"},
{"04cdd6c523673bf448efe055711a9b184817d7843b0a76c2046f5398b5854152", "TestTESTt3st"},
{HEX_TAG "ef797c8118f02dfb649607dd5d3f8c7623048c9c063d532cc95c5ed7a898a64f", "12345678"},
{HEX_TAG "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855", ""},
{HEX_TAG "E3B0C44298FC1C149AFBF4C8996FB92427AE41E4649B934CA495991B7852B855", ""},
{"LcV6aBcc/53FoCJjXQMd7rBUDEpeevrK8V5jQVoJEhU", "password"},
{CISCO_TAG "LcV6aBcc/53FoCJjXQMd7rBUDEpeevrK8V5jQVoJEhU", "password"},
{"a49c2c9d0c006c8cb55a9a7a38822b83e0cd442614cb416af952fa50156761dc", "openwall"},
{"9e7d3e56996c5a06a6a378567e62f5aa7138ebb0f55c0bdaf73666bf77f73380", "mot\xf6rhead"},
{"1b4f0e9851971998e732078544c96b36c3d01cedf7caa332359d6f1d83567014", "test1"},
{"fd61a03af4f77d870fc21e05e7e80678095c92d808cfb3b5c279ee04c74aca13", "test3"},
{"d150eb0383c8ef7478248d7e6cf18db333e8753d05e15a8a83714b7cf63922b3", "thatsworking"},
#ifdef DEBUG
{"c775e7b757ede630cd0aa1113bd102661ab38829ca52a6422ab782862f268646", "1234567890"},
{CISCO_TAG "OsOmQzwozC4ROs/CzpczJoShdCeW9lp7k/tGrPS5Kog", "1"},
{CISCO_TAG "d7kgbEk.P6mpKdduC66fUy1BF0MImo3eyJ9uI/JbMRk", "openwall"},
{CISCO_TAG "p5BSCWNS3ivUDpZlWthR.k4Q/xWqlFyEqXdaPikHenI", "2"},
{CISCO_TAG "HwUf7ev9Fx84X2vvspULAeDbmwlg9jgm/Wk63kc3vfU", "11"},
{CISCO_TAG "bsPEUMVATKKO9yeUlJfE3OCzHlgf0s6goJpg3P1k0UU", "test"},
{CISCO_TAG "Xq81UiuCj7bz9B..EX2BZumsU/d8pF5gs2NlRMW6sTk", "applesucks"},
{CISCO_TAG "O/D/cn1nawcByQoJfBxrNnUx6jjfWV.FNFx5TzmzihU", "AppleSucks"},
#if PLAINTEXT_LENGTH >19
{"6ed645ef0e1abea1bf1e4e935ff04f9e18d39812387f63cda3415b46240f0405", "12345678901234567890"},
{"f54e5c8f810648e7638d25eb7ed6d24b7e5999d588e88826f2aa837d2ee52ecd", "123456789012345678901234567890"},
{"a4ebdd541454b84cc670c9f1f5508baf67ffd3fe59b883267808781f992a0b1d", "1234567890123456789012345678901234567890"},
{"f58fffba129aa67ec63bf12571a42977c0b785d3b2a93cc0538557c91da2115d", "12345678901234567890123456789012345678901234567890"},
{"3874d5c9cc5ab726e6bbebadee22c680ce530004d4f0bb32f765d42a0a6c6dc1", "123456789012345678901234567890123456789012345678901"},
{"03c3a70e99ed5eeccd80f73771fcf1ece643d939d9ecc76f25544b0233f708e9", "1234567890123456789012345678901234567890123456789012345"},
{"0f46e4b0802fee6fed599682a16287d0397699cfd742025482c086a70979e56a", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}, // 31
{"c62e4615bd39e222572f3a1bf7c2132ea1e65b17ec805047bd6b2842c593493f", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}, // 32
{"d5e285683cd4efc02d021a5c62014694958901005d6f71e89e0989fac77e4072", "xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"}, // 55
{CISCO_TAG "hUsuWZSE8dZERUBYNwRK8Aa8VxEGIHsuZFUCjNj2.Ac", "verylongbutweakpassword"},
{CISCO_TAG "fLUL1VG98zYDf9Q.M40nZ5blVT3M6UBex74Blw.UDCc", "thismaximumpasswordlength"},
#endif
#endif
{NULL}
};
#ifdef MMX_COEF_SHA256
#define GETPOS(i, index) ( (index&(MMX_COEF_SHA256-1))*4 + ((i)&(0xffffffff-3))*MMX_COEF_SHA256 + (3-((i)&3)) + (index>>(MMX_COEF_SHA256>>1))*SHA256_BUF_SIZ*MMX_COEF_SHA256*4 )
static uint32_t (*saved_key)[SHA256_BUF_SIZ*MMX_COEF_SHA256];
static uint32_t (*crypt_out)[8*MMX_COEF_SHA256];
#else
static int (*saved_key_length);
static char (*saved_key)[PLAINTEXT_LENGTH + 1];
static ARCH_WORD_32 (*crypt_out)
[(BINARY_SIZE + sizeof(ARCH_WORD_32) - 1) / sizeof(ARCH_WORD_32)];
#endif
static void init(struct fmt_main *self)
{
#ifdef _OPENMP
int omp_t;
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt = omp_t * MIN_KEYS_PER_CRYPT;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt = omp_t * MAX_KEYS_PER_CRYPT;
#endif
#ifndef MMX_COEF_SHA256
saved_key_length = mem_calloc_tiny(sizeof(*saved_key_length) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt, MEM_ALIGN_WORD);
#else
saved_key = mem_calloc_tiny(sizeof(*saved_key) * self->params.max_keys_per_crypt/MMX_COEF_SHA256, MEM_ALIGN_SIMD);
crypt_out = mem_calloc_tiny(sizeof(*crypt_out) * self->params.max_keys_per_crypt/MMX_COEF_SHA256, MEM_ALIGN_SIMD);
#endif
}
static void *binary(char *ciphertext)
{
static unsigned char *out;
int i;
if (!out)
out = mem_alloc_tiny(BINARY_SIZE, MEM_ALIGN_WORD);
ciphertext += HEX_TAG_LEN;
for (i = 0; i < BINARY_SIZE; i++) {
out[i] = atoi16[ARCH_INDEX(ciphertext[i*2])] * 16 +
atoi16[ARCH_INDEX(ciphertext[i*2 + 1])];
}
#ifdef MMX_COEF_SHA256
alter_endianity (out, BINARY_SIZE);
#endif
return out;
}
#ifdef MMX_COEF_SHA256
static int get_hash_0 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0xf; }
static int get_hash_1 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0xff; }
static int get_hash_2 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0xfff; }
static int get_hash_3 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0xffff; }
static int get_hash_4 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0xfffff; }
static int get_hash_5 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0xffffff; }
static int get_hash_6 (int index) { return crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)] & 0x7ffffff; }
#else
static int get_hash_0(int index) { return crypt_out[index][0] & 0xf; }
static int get_hash_1(int index) { return crypt_out[index][0] & 0xff; }
static int get_hash_2(int index) { return crypt_out[index][0] & 0xfff; }
static int get_hash_3(int index) { return crypt_out[index][0] & 0xffff; }
static int get_hash_4(int index) { return crypt_out[index][0] & 0xfffff; }
static int get_hash_5(int index) { return crypt_out[index][0] & 0xffffff; }
static int get_hash_6(int index) { return crypt_out[index][0] & 0x7ffffff; }
#endif
#ifdef MMX_COEF_SHA256
static void set_key(char *key, int index) {
const ARCH_WORD_32 *wkey = (ARCH_WORD_32*)key;
ARCH_WORD_32 *keybuffer = &((ARCH_WORD_32 *)saved_key)[(index&(MMX_COEF_SHA256-1)) + (index>>(MMX_COEF_SHA256>>1))*SHA256_BUF_SIZ*MMX_COEF_SHA256];
ARCH_WORD_32 *keybuf_word = keybuffer;
unsigned int len;
ARCH_WORD_32 temp;
len = 0;
while((unsigned char)(temp = *wkey++)) {
if (!(temp & 0xff00))
{
*keybuf_word = JOHNSWAP((temp & 0xff) | (0x80 << 8));
len++;
goto key_cleaning;
}
if (!(temp & 0xff0000))
{
*keybuf_word = JOHNSWAP((temp & 0xffff) | (0x80 << 16));
len+=2;
goto key_cleaning;
}
if (!(temp & 0xff000000))
{
*keybuf_word = JOHNSWAP(temp | (0x80 << 24));
len+=3;
goto key_cleaning;
}
*keybuf_word = JOHNSWAP(temp);
len += 4;
keybuf_word += MMX_COEF_SHA256;
}
*keybuf_word = 0x80000000;
key_cleaning:
keybuf_word += MMX_COEF_SHA256;
while(*keybuf_word) {
*keybuf_word = 0;
keybuf_word += MMX_COEF_SHA256;
}
keybuffer[15*MMX_COEF_SHA256] = len << 3;
}
#else
static void set_key(char *key, int index)
{
int len = strlen(key);
saved_key_length[index] = len;
if (len > PLAINTEXT_LENGTH)
len = saved_key_length[index] = PLAINTEXT_LENGTH;
memcpy(saved_key[index], key, len);
}
#endif
#ifdef MMX_COEF_SHA256
static char *get_key(int index) {
unsigned int i,s;
static char out[PLAINTEXT_LENGTH+1];
unsigned char *wucp = (unsigned char*)saved_key;
s = ((ARCH_WORD_32 *)saved_key)[15*MMX_COEF_SHA256 + (index&(MMX_COEF_SHA256-1)) + (index>>(MMX_COEF_SHA256>>1))*SHA256_BUF_SIZ*MMX_COEF_SHA256] >> 3;
for(i=0;i<s;i++)
out[i] = wucp[ GETPOS(i, index) ];
out[i] = 0;
return (char*) out;
}
#else
static char *get_key(int index)
{
saved_key[index][saved_key_length[index]] = 0;
return saved_key[index];
}
#endif
static int crypt_all(int *pcount, struct db_salt *salt)
{
int count = *pcount;
int index = 0;
#ifdef _OPENMP
#ifdef MMX_COEF_SHA256
int inc = MMX_COEF_SHA256;
#else
int inc = 1;
#endif
#pragma omp parallel for
for (index = 0; index < count; index += inc)
#endif
{
#ifdef MMX_COEF_SHA256
SSESHA256body(&saved_key[index/MMX_COEF_SHA256], crypt_out[index/MMX_COEF_SHA256], NULL, SSEi_MIXED_IN);
#else
SHA256_CTX ctx;
SHA256_Init(&ctx);
SHA256_Update(&ctx, saved_key[index], saved_key_length[index]);
SHA256_Final((unsigned char *)crypt_out[index], &ctx);
#endif
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
for (index = 0; index < count; index++)
#ifdef MMX_COEF_SHA256
if (((uint32_t *) binary)[0] == crypt_out[index>>(MMX_COEF_SHA256>>1)][index&(MMX_COEF_SHA256-1)])
#else
if ( ((ARCH_WORD_32*)binary)[0] == crypt_out[index][0] )
#endif
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
#ifdef MMX_COEF_SHA256
int i;
for (i = 0; i < BINARY_SIZE/4; i++)
if (((uint32_t *) binary)[i] != crypt_out[index>>(MMX_COEF_SHA256>>1)][(index&(MMX_COEF_SHA256-1))+i*MMX_COEF_SHA256])
return 0;
return 1;
#else
return !memcmp(binary, crypt_out[index], BINARY_SIZE);
#endif
}
static int cmp_exact(char *source, int index)
{
return 1;
}
struct fmt_main fmt_rawSHA256 = {
{
FORMAT_LABEL,
FORMAT_NAME,
"SHA256 " ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_SPLIT_UNIFIES_CASE,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
tests
}, {
init,
fmt_default_done,
fmt_default_reset,
prepare,
valid,
split,
binary,
fmt_default_salt,
#if FMT_MAIN_VERSION > 11
{ NULL },
#endif
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
fmt_default_set_salt,
set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
GB_unop__identity_uint8_uint16.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__identity_uint8_uint16)
// op(A') function: GB (_unop_tran__identity_uint8_uint16)
// C type: uint8_t
// A type: uint16_t
// cast: uint8_t cij = (uint8_t) aij
// unaryop: cij = aij
#define GB_ATYPE \
uint16_t
#define GB_CTYPE \
uint8_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint16_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
uint8_t z = (uint8_t) aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
uint16_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
uint8_t z = (uint8_t) aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_UINT8 || GxB_NO_UINT16)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__identity_uint8_uint16)
(
uint8_t *Cx, // Cx and Ax may be aliased
const uint16_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint16_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
uint16_t aij = Ax [p] ;
uint8_t z = (uint8_t) aij ;
Cx [p] = z ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__identity_uint8_uint16)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
nodal_residualbased_elimination_builder_and_solver_continuity_for_FSI.h | // | / |
// ' / __| _` | __| _ \ __|
// . \ | ( | | ( |\__ `
// _|\_\_| \__,_|\__|\___/ ____/
// Multi-Physics
//
// License: BSD License
// Kratos default license: kratos/license.txt
//
// Main authors: Riccardo Rossi, Alessandro Franci
//
//
#if !defined(KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_CONTINUITY_FOR_FSI)
#define KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_CONTINUITY_FOR_FSI
/* System includes */
#include <set>
#ifdef _OPENMP
#include <omp.h>
#endif
/* External includes */
// #define USE_GOOGLE_HASH
#ifdef USE_GOOGLE_HASH
#include "sparsehash/dense_hash_set" //included in external libraries
#else
#include <unordered_set>
#endif
/* Project includes */
#include "utilities/timer.h"
#include "includes/define.h"
#include "includes/key_hash.h"
#include "solving_strategies/builder_and_solvers/builder_and_solver.h"
#include "includes/model_part.h"
#include "pfem_fluid_dynamics_application_variables.h"
namespace Kratos
{
///@name Kratos Globals
///@{
///@}
///@name Type Definitions
///@{
///@}
///@name Enum's
///@{
///@}
///@name Functions
///@{
///@}
///@name Kratos Classes
///@{
/**
* @class NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI
* @ingroup KratosCore
* @brief Current class provides an implementation for standard builder and solving operations.
* @details The RHS is constituted by the unbalanced loads (residual)
* Degrees of freedom are reordered putting the restrained degrees of freedom at
* the end of the system ordered in reverse order with respect to the DofSet.
* Imposition of the dirichlet conditions is naturally dealt with as the residual already contains
* this information.
* Calculation of the reactions involves a cost very similiar to the calculation of the total residual
* @author Riccardo Rossi
*/
template <class TSparseSpace,
class TDenseSpace, //= DenseSpace<double>,
class TLinearSolver //= LinearSolver<TSparseSpace,TDenseSpace>
>
class NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI
: public BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>
{
public:
///@name Type Definitions
///@{
KRATOS_CLASS_POINTER_DEFINITION(NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI);
typedef BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver> BaseType;
typedef typename BaseType::TSchemeType TSchemeType;
typedef typename BaseType::TDataType TDataType;
typedef typename BaseType::DofsArrayType DofsArrayType;
typedef typename BaseType::TSystemMatrixType TSystemMatrixType;
typedef typename BaseType::TSystemVectorType TSystemVectorType;
typedef typename BaseType::LocalSystemVectorType LocalSystemVectorType;
typedef typename BaseType::LocalSystemMatrixType LocalSystemMatrixType;
typedef typename BaseType::TSystemMatrixPointerType TSystemMatrixPointerType;
typedef typename BaseType::TSystemVectorPointerType TSystemVectorPointerType;
typedef Node<3> NodeType;
typedef typename BaseType::NodesArrayType NodesArrayType;
typedef typename BaseType::ElementsArrayType ElementsArrayType;
typedef typename BaseType::ConditionsArrayType ConditionsArrayType;
typedef typename BaseType::ElementsContainerType ElementsContainerType;
typedef Vector VectorType;
///@}
///@name Life Cycle
///@{
/** Constructor.
*/
NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI(
typename TLinearSolver::Pointer pNewLinearSystemSolver)
: BuilderAndSolver<TSparseSpace, TDenseSpace, TLinearSolver>(pNewLinearSystemSolver)
{
// KRATOS_INFO("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI") << "Using the standard builder and solver " << std::endl;
}
/** Destructor.
*/
~NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI() override
{
}
///@}
///@name Operators
///@{
///@}
///@name Operations
///@{
void BuildAll(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &b)
{
KRATOS_TRY
KRATOS_ERROR_IF(!pScheme) << "No scheme provided!" << std::endl;
//contributions to the continuity equation system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
Element::EquationIdVectorType EquationId;
// LocalSystemMatrixType solidLHS_Contribution = LocalSystemMatrixType(0, 0);
// LocalSystemVectorType solidRHS_Contribution = LocalSystemVectorType(0);
// Element::EquationIdVectorType solidEquationId;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
const double timeInterval = CurrentProcessInfo[DELTA_TIME];
double deltaPressure = 0;
/* #pragma omp parallel */
// {
ModelPart::NodeIterator NodesBegin;
ModelPart::NodeIterator NodesEnd;
OpenMPUtils::PartitionedIterators(rModelPart.Nodes(), NodesBegin, NodesEnd);
for (ModelPart::NodeIterator itNode = NodesBegin; itNode != NodesEnd; ++itNode)
{
// if(itNode->Is(SOLID)){
// NodeWeakPtrVectorType& neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
// const unsigned int neighSize = neighb_nodes.size() +1 ;
// if(neighSize>1)
// {
// const double nodalVolume=itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
// if (solidLHS_Contribution.size1() != 1)
// solidLHS_Contribution.resize(1, 1, false); //false says not to preserve existing storage!!
// if (solidRHS_Contribution.size() != 1)
// solidRHS_Contribution.resize(1, false); //false says not to preserve existing storage!!
// solidLHS_Contribution= ZeroMatrix(1,1);
// solidRHS_Contribution= ZeroVector(1);
// if (solidEquationId.size() != 1)
// solidEquationId.resize(1, false);
// // if (solidLHS_Contribution.size1() != neighSize)
// // solidLHS_Contribution.resize(neighSize, neighSize, false); //false says not to preserve existing storage!!
// // if (solidRHS_Contribution.size() != neighSize)
// // solidRHS_Contribution.resize(neighSize, false); //false says not to preserve existing storage!!
// // solidLHS_Contribution= ZeroMatrix(neighSize,neighSize);
// // solidRHS_Contribution= ZeroVector(neighSize);
// // if (solidEquationId.size() != neighSize)
// // solidEquationId.resize(neighSize, false);
// double deviatoricCoeff=itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
// double volumetricCoeff=itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT)+2.0*deviatoricCoeff/3.0;
// deltaPressure=itNode->FastGetSolutionStepValue(PRESSURE,0)-itNode->FastGetSolutionStepValue(PRESSURE,1);
// solidLHS_Contribution(0,0)+= nodalVolume/volumetricCoeff;
// solidRHS_Contribution[0] += -deltaPressure*nodalVolume/volumetricCoeff;
// solidRHS_Contribution[0] += itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE)*nodalVolume;
// const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE);
// solidEquationId[0]=itNode->GetDof(PRESSURE,xDofPos).EquationId();
// // Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(SOLID_NODAL_SFD_NEIGHBOURS_ORDER);
// // // const unsigned int neighSize = neighb_nodes.size()+1;
// // const unsigned int neighSize = nodalSFDneighboursId.size();
// // if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// // for (unsigned int i = 0; i< neighSize; i++)
// // {
// // unsigned int indexNode=i+1;
// // if(indexNode<neighSize){
// // unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode];
// // for (unsigned int k = 0; k< neighb_nodes.size(); k++)
// // {
// // unsigned int neigh_nodes_id=neighb_nodes[k].Id();
// // if(neigh_nodes_id==other_neigh_nodes_id){
// // solidEquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId();
// // break;
// // }
// // }
// // }
// // }
// // }else{
// // for (unsigned int k = 0; k< neighb_nodes.size(); k++)
// // {
// // solidEquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId();
// // }
// // }
// #ifdef _OPENMP
// Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId, mlock_array);
// #else
// Assemble(A, b, solidLHS_Contribution, solidRHS_Contribution, solidEquationId);
// #endif
// }
// }
//if((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
NodeWeakPtrVectorType &neighb_nodes = itNode->GetValue(NEIGHBOUR_NODES);
const unsigned int neighSize = neighb_nodes.size() + 1;
if (neighSize > 1)
{
// if (LHS_Contribution.size1() != neighSize)
// LHS_Contribution.resize(neighSize, neighSize, false); //false says not to preserve existing storage!!
// if (RHS_Contribution.size() != neighSize)
// RHS_Contribution.resize(neighSize, false); //false says not to preserve existing storage!!
// LHS_Contribution= ZeroMatrix(neighSize,neighSize);
// RHS_Contribution= ZeroVector(neighSize);
// if (EquationId.size() != neighSize)
// EquationId.resize(neighSize, false);
if (LHS_Contribution.size1() != 1)
LHS_Contribution.resize(1, 1, false); //false says not to preserve existing storage!!
if (RHS_Contribution.size() != 1)
RHS_Contribution.resize(1, false); //false says not to preserve existing storage!!
noalias(LHS_Contribution) = ZeroMatrix(1, 1);
noalias(RHS_Contribution) = ZeroVector(1);
if (EquationId.size() != 1)
EquationId.resize(1, false);
if ((itNode->Is(FLUID) && itNode->IsNot(SOLID)) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
double nodalVolume = itNode->FastGetSolutionStepValue(NODAL_VOLUME);
if (nodalVolume > 0)
{ // in interface nodes not in contact with fluid elements the nodal volume is zero
double deviatoricCoeff = itNode->FastGetSolutionStepValue(DEVIATORIC_COEFFICIENT);
double yieldShear = itNode->FastGetSolutionStepValue(YIELD_SHEAR);
if (yieldShear > 0)
{
double adaptiveExponent = itNode->FastGetSolutionStepValue(ADAPTIVE_EXPONENT);
double equivalentStrainRate = itNode->FastGetSolutionStepValue(NODAL_EQUIVALENT_STRAIN_RATE);
double exponent = -adaptiveExponent * equivalentStrainRate;
if (equivalentStrainRate != 0)
{
deviatoricCoeff += (yieldShear / equivalentStrainRate) * (1 - exp(exponent));
}
if (equivalentStrainRate < 0.00001 && yieldShear != 0 && adaptiveExponent != 0)
{
// for gamma_dot very small the limit of the Papanastasiou viscosity is mu=m*tau_yield
deviatoricCoeff = adaptiveExponent * yieldShear;
}
}
if (deviatoricCoeff > 0.1 && itNode->IsNot(SOLID))
{
deviatoricCoeff = 0.1;
}
double volumetricCoeff = itNode->FastGetSolutionStepValue(VOLUMETRIC_COEFFICIENT) + 2.0 * deviatoricCoeff / 3.0;
if (itNode->IsNot(SOLID) || itNode->FastGetSolutionStepValue(INTERFACE_NODE) == true)
{
volumetricCoeff = timeInterval * itNode->FastGetSolutionStepValue(BULK_MODULUS);
}
deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1);
LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff;
RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff;
RHS_Contribution[0] += itNode->GetSolutionStepValue(NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume;
}
}
if (itNode->Is(SOLID))
{
double nodalVolume = itNode->FastGetSolutionStepValue(SOLID_NODAL_VOLUME);
double youngModulus = itNode->FastGetSolutionStepValue(YOUNG_MODULUS);
double poissonRatio = itNode->FastGetSolutionStepValue(POISSON_RATIO);
double deviatoricCoeff = timeInterval * youngModulus / (1.0 + poissonRatio) * 0.5;
double volumetricCoeff = timeInterval * poissonRatio * youngModulus / ((1.0 + poissonRatio) * (1.0 - 2.0 * poissonRatio)) + 2.0 * deviatoricCoeff / 3.0;
deltaPressure = itNode->FastGetSolutionStepValue(PRESSURE, 0) - itNode->FastGetSolutionStepValue(PRESSURE, 1);
LHS_Contribution(0, 0) += nodalVolume / volumetricCoeff;
RHS_Contribution[0] += -deltaPressure * nodalVolume / volumetricCoeff;
RHS_Contribution[0] += itNode->GetSolutionStepValue(SOLID_NODAL_VOLUMETRIC_DEF_RATE) * nodalVolume;
}
const unsigned int xDofPos = itNode->GetDofPosition(PRESSURE);
EquationId[0] = itNode->GetDof(PRESSURE, xDofPos).EquationId();
// Vector nodalSFDneighboursId = itNode->FastGetSolutionStepValue(NODAL_SFD_NEIGHBOURS_ORDER);
// // const unsigned int neighSize = neighb_nodes.size()+1;
// const unsigned int neighSize = nodalSFDneighboursId.size();
// if(itNode->FastGetSolutionStepValue(INTERFACE_NODE)==true){
// for (unsigned int i = 0; i< neighSize; i++)
// {
// unsigned int indexNode=i+1;
// if(indexNode<neighSize){
// unsigned int other_neigh_nodes_id=nodalSFDneighboursId[indexNode];
// for (unsigned int k = 0; k< neighb_nodes.size(); k++)
// {
// unsigned int neigh_nodes_id=neighb_nodes[k].Id();
// if(neigh_nodes_id==other_neigh_nodes_id){
// EquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId();
// break;
// }
// }
// }
// }
// }else{
// for (unsigned int k = 0; k< neighb_nodes.size(); k++)
// {
// EquationId[k+1]=neighb_nodes[k].GetDof(PRESSURE,xDofPos).EquationId();
// }
// }
#ifdef _OPENMP
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, mlock_array);
#else
Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
//}
}
// }
ElementsArrayType &pElements = rModelPart.Elements();
int number_of_threads = ParallelUtilities::GetNumThreads();
#ifdef _OPENMP
int A_size = A.size1();
//creating an array of lock variables of the size of the system matrix
std::vector<omp_lock_t> lock_array(A.size1());
for (int i = 0; i < A_size; i++)
omp_init_lock(&lock_array[i]);
#endif
DenseVector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
if (this->GetEchoLevel() > 0)
{
KRATOS_WATCH(number_of_threads);
KRATOS_WATCH(element_partition);
}
#pragma omp parallel for firstprivate(number_of_threads) schedule(static, 1)
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType elementalLHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType elementalRHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType elementalEquationId;
const ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1];
unsigned int pos = (rModelPart.Nodes().begin())->GetDofPosition(PRESSURE);
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//if((*it)->Is(FLUID)){
if ((*it)->IsNot(SOLID))
{
//calculate elemental contribution
(*it)->CalculateLocalSystem(elementalLHS_Contribution, elementalRHS_Contribution, CurrentProcessInfo);
Geometry<Node<3>> &geom = (*it)->GetGeometry();
if (elementalEquationId.size() != geom.size())
elementalEquationId.resize(geom.size(), false);
for (unsigned int i = 0; i < geom.size(); i++)
elementalEquationId[i] = geom[i].GetDof(PRESSURE, pos).EquationId();
//assemble the elemental contribution
#ifdef _OPENMP
this->Assemble(A, b, elementalLHS_Contribution, elementalRHS_Contribution, elementalEquationId, lock_array);
#else
this->Assemble(A, b, elementalLHS_Contribution, elementalRHS_Contribution, elementalEquationId);
#endif
}
}
}
#ifdef _OPENMP
for (int i = 0; i < A_size; i++)
omp_destroy_lock(&lock_array[i]);
#endif
KRATOS_CATCH("")
}
/**
* @brief This is a call to the linear system solver
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void SystemSolve(
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
TSparseSpace::SetToZero(Dx);
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
*@brief This is a call to the linear system solver (taking into account some physical particularities of the problem)
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
* @param rModelPart The model part of the problem to solve
*/
void SystemSolveWithPhysics(
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b,
ModelPart &rModelPart)
{
KRATOS_TRY
double norm_b;
if (TSparseSpace::Size(b) != 0)
norm_b = TSparseSpace::TwoNorm(b);
else
norm_b = 0.00;
if (norm_b != 0.00)
{
//provide physical data as needed
if (BaseType::mpLinearSystemSolver->AdditionalPhysicalDataIsNeeded())
BaseType::mpLinearSystemSolver->ProvideAdditionalData(A, Dx, b, BaseType::mDofSet, rModelPart);
//do solve
BaseType::mpLinearSystemSolver->Solve(A, Dx, b);
}
else
{
TSparseSpace::SetToZero(Dx);
KRATOS_WARNING_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", rModelPart.GetCommunicator().MyPID() == 0) << "ATTENTION! setting the RHS to zero!" << std::endl;
}
// Prints informations about the current time
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << *(BaseType::mpLinearSystemSolver) << std::endl;
KRATOS_CATCH("")
}
/**
* @brief Function to perform the building and solving phase at the same time.
* @details It is ideally the fastest and safer function to use when it is possible to solve
* just after building
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void BuildAndSolve(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
KRATOS_TRY
Timer::Start("Build");
/* boost::timer c_build_time; */
BuildAll(pScheme, rModelPart, A, b);
Timer::Stop("Build");
// ApplyPointLoads(pScheme,rModelPart,b);
// Does nothing...dirichlet conditions are naturally dealt with in defining the residual
ApplyDirichletConditions(pScheme, rModelPart, A, Dx, b);
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "Before the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
/* const double start_solve = OpenMPUtils::GetCurrentTime(); */
// Timer::Start("Solve");
/* boost::timer c_solve_time; */
SystemSolveWithPhysics(A, Dx, b, rModelPart);
/* std::cout << "CONTINUITY EQ: solve_time : " << c_solve_time.elapsed() << std::endl; */
// Timer::Stop("Solve");
/* const double stop_solve = OpenMPUtils::GetCurrentTime(); */
KRATOS_INFO_IF("ResidualBasedBlockBuilderAndSolver", (this->GetEchoLevel() == 3)) << "After the solution of the system"
<< "\nSystem Matrix = " << A << "\nUnknowns vector = " << Dx << "\nRHS vector = " << b << std::endl;
KRATOS_CATCH("")
}
void Build(
typename TSchemeType::Pointer pScheme,
ModelPart &r_model_part,
TSystemMatrixType &A,
TSystemVectorType &b) override
{
KRATOS_TRY
if (!pScheme)
KRATOS_THROW_ERROR(std::runtime_error, "No scheme provided!", "");
//getting the elements from the model
ElementsArrayType &pElements = r_model_part.Elements();
// //getting the array of the conditions
// ConditionsArrayType& ConditionsArray = r_model_part.Conditions();
//resetting to zero the vector of reactions
TSparseSpace::SetToZero(*(BaseType::mpReactionsVector));
//create a partition of the element array
int number_of_threads = ParallelUtilities::GetNumThreads();
#ifdef _OPENMP
int A_size = A.size1();
//creating an array of lock variables of the size of the system matrix
std::vector<omp_lock_t> lock_array(A.size1());
for (int i = 0; i < A_size; i++)
omp_init_lock(&lock_array[i]);
#endif
DenseVector<unsigned int> element_partition;
CreatePartition(number_of_threads, pElements.size(), element_partition);
if (this->GetEchoLevel() > 0)
{
KRATOS_WATCH(number_of_threads);
KRATOS_WATCH(element_partition);
}
// double start_prod = OpenMPUtils::GetCurrentTime();
#pragma omp parallel for firstprivate(number_of_threads) schedule(static, 1)
for (int k = 0; k < number_of_threads; k++)
{
//contributions to the system
LocalSystemMatrixType LHS_Contribution = LocalSystemMatrixType(0, 0);
LocalSystemVectorType RHS_Contribution = LocalSystemVectorType(0);
//vector containing the localization in the system of the different
//terms
Element::EquationIdVectorType EquationId;
const ProcessInfo &CurrentProcessInfo = r_model_part.GetProcessInfo();
typename ElementsArrayType::ptr_iterator it_begin = pElements.ptr_begin() + element_partition[k];
typename ElementsArrayType::ptr_iterator it_end = pElements.ptr_begin() + element_partition[k + 1];
unsigned int pos = (r_model_part.Nodes().begin())->GetDofPosition(PRESSURE);
// assemble all elements
for (typename ElementsArrayType::ptr_iterator it = it_begin; it != it_end; ++it)
{
//calculate elemental contribution
(*it)->CalculateLocalSystem(LHS_Contribution, RHS_Contribution, CurrentProcessInfo);
Geometry<Node<3>> &geom = (*it)->GetGeometry();
if (EquationId.size() != geom.size())
EquationId.resize(geom.size(), false);
for (unsigned int i = 0; i < geom.size(); i++)
EquationId[i] = geom[i].GetDof(PRESSURE, pos).EquationId();
//assemble the elemental contribution
#ifdef _OPENMP
this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId, lock_array);
#else
this->Assemble(A, b, LHS_Contribution, RHS_Contribution, EquationId);
#endif
}
}
// if (this->GetEchoLevel() > 0)
// {
// double stop_prod = OpenMPUtils::GetCurrentTime();
// std::cout << "parallel building time: " << stop_prod - start_prod << std::endl;
// }
#ifdef _OPENMP
for (int i = 0; i < A_size; i++)
omp_destroy_lock(&lock_array[i]);
#endif
KRATOS_CATCH("")
}
/**
* @brief Builds the list of the DofSets involved in the problem by "asking" to each element
* and condition its Dofs.
* @details The list of dofs is stores insde the BuilderAndSolver as it is closely connected to the
* way the matrix and RHS are built
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
*/
void SetUpDofSet(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart) override
{
KRATOS_TRY;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1 && rModelPart.GetCommunicator().MyPID() == 0) << "Setting up the dofs" << std::endl;
//Gets the array of elements from the modeler
ElementsArrayType &pElements = rModelPart.Elements();
const int nelements = static_cast<int>(pElements.size());
Element::DofsVectorType ElementalDofList;
ProcessInfo &CurrentProcessInfo = rModelPart.GetProcessInfo();
unsigned int nthreads = ParallelUtilities::GetNumThreads();
// typedef boost::fast_pool_allocator< NodeType::DofType::Pointer > allocator_type;
// typedef std::unordered_set < NodeType::DofType::Pointer,
// DofPointerHasher,
// DofPointerComparor,
// allocator_type > set_type;
#ifdef USE_GOOGLE_HASH
typedef google::dense_hash_set<NodeType::DofType::Pointer, DofPointerHasher> set_type;
#else
typedef std::unordered_set<NodeType::DofType::Pointer, DofPointerHasher> set_type;
#endif
//
std::vector<set_type> dofs_aux_list(nthreads);
// std::vector<allocator_type> allocators(nthreads);
for (int i = 0; i < static_cast<int>(nthreads); i++)
{
#ifdef USE_GOOGLE_HASH
dofs_aux_list[i].set_empty_key(NodeType::DofType::Pointer());
#else
// dofs_aux_list[i] = set_type( allocators[i]);
dofs_aux_list[i].reserve(nelements);
#endif
}
//#pragma omp parallel for firstprivate(nelements, ElementalDofList)
for (int i = 0; i < static_cast<int>(nelements); ++i)
{
auto it_elem = pElements.begin() + i;
const IndexType this_thread_id = OpenMPUtils::ThisThread();
// Gets list of Dof involved on every element
pScheme->GetDofList(*it_elem, ElementalDofList, CurrentProcessInfo);
dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
}
// ConditionsArrayType& pConditions = rModelPart.Conditions();
// const int nconditions = static_cast<int>(pConditions.size());
// #pragma omp parallel for firstprivate(nconditions, ElementalDofList)
// for (int i = 0; i < nconditions; i++)
// {
// typename ConditionsArrayType::iterator it = pConditions.begin() + i;
// const unsigned int this_thread_id = OpenMPUtils::ThisThread();
// // gets list of Dof involved on every element
// pScheme->GetConditionDofList(*(it.base()), ElementalDofList, CurrentProcessInfo);
// dofs_aux_list[this_thread_id].insert(ElementalDofList.begin(), ElementalDofList.end());
// }
//here we do a reduction in a tree so to have everything on thread 0
unsigned int old_max = nthreads;
unsigned int new_max = ceil(0.5 * static_cast<double>(old_max));
while (new_max >= 1 && new_max != old_max)
{
// //just for debugging
// std::cout << "old_max" << old_max << " new_max:" << new_max << std::endl;
// for (int i = 0; i < new_max; i++)
// {
// if (i + new_max < old_max)
// {
// std::cout << i << " - " << i + new_max << std::endl;
// }
// }
// std::cout << "********************" << std::endl;
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(new_max); i++)
{
if (i + new_max < old_max)
{
dofs_aux_list[i].insert(dofs_aux_list[i + new_max].begin(), dofs_aux_list[i + new_max].end());
dofs_aux_list[i + new_max].clear();
}
}
old_max = new_max;
new_max = ceil(0.5 * static_cast<double>(old_max));
}
DofsArrayType Doftemp;
BaseType::mDofSet = DofsArrayType();
Doftemp.reserve(dofs_aux_list[0].size());
for (auto it = dofs_aux_list[0].begin(); it != dofs_aux_list[0].end(); it++)
{
Doftemp.push_back(*it);
}
Doftemp.Sort();
BaseType::mDofSet = Doftemp;
// Throws an execption if there are no Degrees of freedom involved in the analysis
KRATOS_ERROR_IF(BaseType::mDofSet.size() == 0) << "No degrees of freedom!" << std::endl;
BaseType::mDofSetIsInitialized = true;
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 2 && rModelPart.GetCommunicator().MyPID() == 0) << "Finished setting up the dofs" << std::endl;
#ifdef _OPENMP
if (mlock_array.size() != 0)
{
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_destroy_lock(&mlock_array[i]);
}
mlock_array.resize(BaseType::mDofSet.size());
for (int i = 0; i < static_cast<int>(mlock_array.size()); i++)
omp_init_lock(&mlock_array[i]);
#endif
// If reactions are to be calculated, we check if all the dofs have reactions defined
// This is tobe done only in debug mode
#ifdef KRATOS_DEBUG
if (BaseType::GetCalculateReactionsFlag())
{
for (auto dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
{
KRATOS_ERROR_IF_NOT(dof_iterator->HasReaction()) << "Reaction variable not set for the following : " << std::endl
<< "Node : " << dof_iterator->Id() << std::endl
<< "Dof : " << (*dof_iterator) << std::endl
<< "Not possible to calculate reactions." << std::endl;
}
}
#endif
KRATOS_CATCH("");
}
/**
* @brief Organises the dofset in order to speed up the building phase
* @param rModelPart The model part of the problem to solve
*/
void SetUpSystem(
ModelPart &rModelPart) override
{
// Set equation id for degrees of freedom
// the free degrees of freedom are positioned at the beginning of the system,
// while the fixed one are at the end (in opposite order).
//
// that means that if the EquationId is greater than "mEquationSystemSize"
// the pointed degree of freedom is restrained
//
int free_id = 0;
int fix_id = BaseType::mDofSet.size();
for (typename DofsArrayType::iterator dof_iterator = BaseType::mDofSet.begin(); dof_iterator != BaseType::mDofSet.end(); ++dof_iterator)
if (dof_iterator->IsFixed())
dof_iterator->SetEquationId(--fix_id);
else
dof_iterator->SetEquationId(free_id++);
BaseType::mEquationSystemSize = fix_id;
}
//**************************************************************************
//**************************************************************************
void ResizeAndInitializeVectors(
typename TSchemeType::Pointer pScheme,
TSystemMatrixPointerType &pA,
TSystemVectorPointerType &pDx,
TSystemVectorPointerType &pb,
ModelPart &rModelPart) override
{
KRATOS_TRY
/* boost::timer c_contruct_matrix; */
if (pA == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemMatrixPointerType pNewA = TSystemMatrixPointerType(new TSystemMatrixType(0, 0));
pA.swap(pNewA);
}
if (pDx == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewDx = TSystemVectorPointerType(new TSystemVectorType(0));
pDx.swap(pNewDx);
}
if (pb == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewb = TSystemVectorPointerType(new TSystemVectorType(0));
pb.swap(pNewb);
}
if (BaseType::mpReactionsVector == NULL) //if the pointer is not initialized initialize it to an empty matrix
{
TSystemVectorPointerType pNewReactionsVector = TSystemVectorPointerType(new TSystemVectorType(0));
BaseType::mpReactionsVector.swap(pNewReactionsVector);
}
TSystemMatrixType &A = *pA;
TSystemVectorType &Dx = *pDx;
TSystemVectorType &b = *pb;
//resizing the system vectors and matrix
if (A.size1() == 0 || BaseType::GetReshapeMatrixFlag() == true) //if the matrix is not initialized
{
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, false);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
else
{
if (A.size1() != BaseType::mEquationSystemSize || A.size2() != BaseType::mEquationSystemSize)
{
KRATOS_WATCH("it should not come here!!!!!!!! ... this is SLOW");
KRATOS_ERROR << "The equation system size has changed during the simulation. This is not permited." << std::endl;
A.resize(BaseType::mEquationSystemSize, BaseType::mEquationSystemSize, true);
ConstructMatrixStructure(pScheme, A, rModelPart);
}
}
if (Dx.size() != BaseType::mEquationSystemSize)
Dx.resize(BaseType::mEquationSystemSize, false);
if (b.size() != BaseType::mEquationSystemSize)
b.resize(BaseType::mEquationSystemSize, false);
//if needed resize the vector for the calculation of reactions
if (BaseType::mCalculateReactionsFlag == true)
{
unsigned int ReactionsVectorSize = BaseType::mDofSet.size();
if (BaseType::mpReactionsVector->size() != ReactionsVectorSize)
BaseType::mpReactionsVector->resize(ReactionsVectorSize, false);
}
/* std::cout << "CONTINUITY EQ: contruct_matrix : " << c_contruct_matrix.elapsed() << std::endl; */
KRATOS_CATCH("")
}
//**************************************************************************
//**************************************************************************
/**
* @brief Applies the dirichlet conditions. This operation may be very heavy or completely
* unexpensive depending on the implementation choosen and on how the System Matrix is built.
* @details For explanation of how it works for a particular implementation the user
* should refer to the particular Builder And Solver choosen
* @param pScheme The integration scheme considered
* @param rModelPart The model part of the problem to solve
* @param A The LHS matrix
* @param Dx The Unknowns vector
* @param b The RHS vector
*/
void ApplyDirichletConditions(
typename TSchemeType::Pointer pScheme,
ModelPart &rModelPart,
TSystemMatrixType &A,
TSystemVectorType &Dx,
TSystemVectorType &b) override
{
}
/**
* @brief This function is intended to be called at the end of the solution step to clean up memory storage not needed
*/
void Clear() override
{
this->mDofSet = DofsArrayType();
if (this->mpReactionsVector != NULL)
TSparseSpace::Clear((this->mpReactionsVector));
// this->mReactionsVector = TSystemVectorType();
this->mpLinearSystemSolver->Clear();
KRATOS_INFO_IF("NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI", this->GetEchoLevel() > 1) << "Clear Function called" << std::endl;
}
/**
* @brief This function is designed to be called once to perform all the checks needed
* on the input provided. Checks can be "expensive" as the function is designed
* to catch user's errors.
* @param rModelPart The model part of the problem to solve
* @return 0 all ok
*/
int Check(ModelPart &rModelPart) override
{
KRATOS_TRY
return 0;
KRATOS_CATCH("");
}
///@}
///@name Access
///@{
///@}
///@name Inquiry
///@{
///@}
///@name Friends
///@{
///@}
protected:
///@name Protected static Member Variables
///@{
///@}
///@name Protected member Variables
///@{
///@}
///@name Protected Operators
///@{
///@}
///@name Protected Operations
///@{
void Assemble(
TSystemMatrixType &A,
TSystemVectorType &b,
const LocalSystemMatrixType &LHS_Contribution,
const LocalSystemVectorType &RHS_Contribution,
const Element::EquationIdVectorType &EquationId
#ifdef _OPENMP
,
std::vector<omp_lock_t> &lock_array
#endif
)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
#ifdef _OPENMP
omp_set_lock(&lock_array[i_global]);
#endif
b[i_global] += RHS_Contribution(i_local);
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
{
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
#ifdef _OPENMP
omp_unset_lock(&lock_array[i_global]);
#endif
}
//note that assembly on fixed rows is not performed here
}
}
//**************************************************************************
virtual void ConstructMatrixStructure(
typename TSchemeType::Pointer pScheme,
TSystemMatrixType &A,
ModelPart &rModelPart)
{
//filling with zero the matrix (creating the structure)
Timer::Start("MatrixStructure");
const std::size_t equation_size = BaseType::mEquationSystemSize;
std::vector<std::unordered_set<std::size_t>> indices(equation_size);
#pragma omp parallel for firstprivate(equation_size)
for (int iii = 0; iii < static_cast<int>(equation_size); iii++)
{
indices[iii].reserve(40);
}
Element::EquationIdVectorType ids(3, 0);
#pragma omp parallel firstprivate(ids)
{
// The process info
ProcessInfo &r_current_process_info = rModelPart.GetProcessInfo();
// We repeat the same declaration for each thead
std::vector<std::unordered_set<std::size_t>> temp_indexes(equation_size);
#pragma omp for
for (int index = 0; index < static_cast<int>(equation_size); ++index)
temp_indexes[index].reserve(30);
// Getting the size of the array of elements from the model
const int number_of_elements = static_cast<int>(rModelPart.Elements().size());
// Element initial iterator
const auto el_begin = rModelPart.ElementsBegin();
// We iterate over the elements
#pragma omp for schedule(guided, 512) nowait
for (int i_elem = 0; i_elem < number_of_elements; ++i_elem)
{
auto it_elem = el_begin + i_elem;
pScheme->EquationId(*it_elem, ids, r_current_process_info);
for (auto &id_i : ids)
{
if (id_i < BaseType::mEquationSystemSize)
{
auto &row_indices = temp_indexes[id_i];
for (auto &id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Getting the size of the array of the conditions
const int number_of_conditions = static_cast<int>(rModelPart.Conditions().size());
// Condition initial iterator
const auto cond_begin = rModelPart.ConditionsBegin();
// We iterate over the conditions
#pragma omp for schedule(guided, 512) nowait
for (int i_cond = 0; i_cond < number_of_conditions; ++i_cond)
{
auto it_cond = cond_begin + i_cond;
pScheme->EquationId(*it_cond, ids, r_current_process_info);
for (auto &id_i : ids)
{
if (id_i < BaseType::mEquationSystemSize)
{
auto &row_indices = temp_indexes[id_i];
for (auto &id_j : ids)
if (id_j < BaseType::mEquationSystemSize)
row_indices.insert(id_j);
}
}
}
// Merging all the temporal indexes
#pragma omp critical
{
for (int i = 0; i < static_cast<int>(temp_indexes.size()); ++i)
{
indices[i].insert(temp_indexes[i].begin(), temp_indexes[i].end());
}
}
}
//count the row sizes
unsigned int nnz = 0;
for (unsigned int i = 0; i < indices.size(); i++)
nnz += indices[i].size();
A = boost::numeric::ublas::compressed_matrix<double>(indices.size(), indices.size(), nnz);
double *Avalues = A.value_data().begin();
std::size_t *Arow_indices = A.index1_data().begin();
std::size_t *Acol_indices = A.index2_data().begin();
//filling the index1 vector - DO NOT MAKE PARALLEL THE FOLLOWING LOOP!
Arow_indices[0] = 0;
for (int i = 0; i < static_cast<int>(A.size1()); i++)
Arow_indices[i + 1] = Arow_indices[i] + indices[i].size();
#pragma omp parallel for
for (int i = 0; i < static_cast<int>(A.size1()); i++)
{
const unsigned int row_begin = Arow_indices[i];
const unsigned int row_end = Arow_indices[i + 1];
unsigned int k = row_begin;
for (auto it = indices[i].begin(); it != indices[i].end(); it++)
{
Acol_indices[k] = *it;
Avalues[k] = 0.0;
k++;
}
std::sort(&Acol_indices[row_begin], &Acol_indices[row_end]);
}
A.set_filled(indices.size() + 1, nnz);
Timer::Stop("MatrixStructure");
}
void AssembleLHS(
TSystemMatrixType &A,
LocalSystemMatrixType &LHS_Contribution,
Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
unsigned int j_global = EquationId[j_local];
if (j_global < BaseType::mEquationSystemSize)
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Protected Access
///@{
///@}
///@name Protected Inquiry
///@{
///@}
///@name Protected LifeCycle
///@{
///@}
private:
///@name Static Member Variables
///@{
///@}
///@name Member Variables
///@{
#ifdef _OPENMP
std::vector<omp_lock_t> mlock_array;
#endif
///@}
///@name Private Operators
///@{
///@}
///@name Private Operations
///@{
inline void AddUnique(std::vector<std::size_t> &v, const std::size_t &candidate)
{
std::vector<std::size_t>::iterator i = v.begin();
std::vector<std::size_t>::iterator endit = v.end();
while (i != endit && (*i) != candidate)
{
i++;
}
if (i == endit)
{
v.push_back(candidate);
}
}
inline void CreatePartition(unsigned int number_of_threads, const int number_of_rows, DenseVector<unsigned int> &partitions)
{
partitions.resize(number_of_threads + 1);
int partition_size = number_of_rows / number_of_threads;
partitions[0] = 0;
partitions[number_of_threads] = number_of_rows;
for (unsigned int i = 1; i < number_of_threads; i++)
partitions[i] = partitions[i - 1] + partition_size;
}
void AssembleRHS(
TSystemVectorType &b,
const LocalSystemVectorType &RHS_Contribution,
const Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = RHS_Contribution.size();
if (BaseType::mCalculateReactionsFlag == false)
{
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double &b_value = b[i_global];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
else
{
TSystemVectorType &ReactionsVector = *BaseType::mpReactionsVector;
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
const unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize) //free dof
{
// ASSEMBLING THE SYSTEM VECTOR
double &b_value = b[i_global];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
else //fixed dof
{
double &b_value = ReactionsVector[i_global - BaseType::mEquationSystemSize];
const double &rhs_value = RHS_Contribution[i_local];
#pragma omp atomic
b_value += rhs_value;
}
}
}
}
//**************************************************************************
void AssembleLHS_CompleteOnFreeRows(
TSystemMatrixType &A,
LocalSystemMatrixType &LHS_Contribution,
Element::EquationIdVectorType &EquationId)
{
unsigned int local_size = LHS_Contribution.size1();
for (unsigned int i_local = 0; i_local < local_size; i_local++)
{
unsigned int i_global = EquationId[i_local];
if (i_global < BaseType::mEquationSystemSize)
{
for (unsigned int j_local = 0; j_local < local_size; j_local++)
{
int j_global = EquationId[j_local];
A(i_global, j_global) += LHS_Contribution(i_local, j_local);
}
}
}
}
///@}
///@name Private Operations
///@{
///@}
///@name Private Access
///@{
///@}
///@name Private Inquiry
///@{
///@}
///@name Un accessible methods
///@{
///@}
}; /* Class NodalResidualBasedEliminationBuilderAndSolverContinuityForFSI */
///@}
///@name Type Definitions
///@{
///@}
} /* namespace Kratos.*/
#endif /* KRATOS_NODAL_RESIDUAL_BASED_ELIMINATION_BUILDER_AND_SOLVER_FOR_FSI defined */
|
lcm2_profiler.c | /** @file lcm2_profiler.c
*
* @par Copyright:
* 2009-2017 (C) Kai-Uwe Behrmann
*
* @brief littleCMS CMM profile generator for Oyranos
* @internal
* @author Kai-Uwe Behrmann <ku.b@gmx.de>
* @par License:
* MIT <http://www.opensource.org/licenses/MIT>
* @since 2009/10/24
*/
#include "lcm2_profiler.h"
#include <assert.h>
#include <lcms2.h>
#include <stdarg.h>
#include <stdlib.h>
#include <stdint.h>
#include <string.h>
#include <math.h>
#include <wchar.h>
#ifndef OY_UNUSED
#if (__GNUC__*100 + __GNUC_MINOR__) >= 406
#define OY_UNUSED __attribute__ ((unused))
#elif defined(_MSC_VER)
#define OY_UNUSED __declspec(unused)
#else
#define OY_UNUSED
#endif
#endif
#ifndef OY_FALLTHROUGH
#if defined(__clang__)
#define OY_FALLTHROUGH
#elif __GNUC__ >= 7
#define OY_FALLTHROUGH __attribute__ ((fallthrough));
#else
#define OY_FALLTHROUGH
#endif
#endif
#if LCMS_VERSION < 2050
/* 'dscm' */
#define cmsSigProfileDescriptionMLTag 0x6473636d
#endif
#define lcm2Free_m(v) if(v) { free(v); v = NULL; }
extern lcm2Message_f lcm2msg_p;
static const int max_channels = 16;
/* core functions */
typedef struct {
cmsHTRANSFORM in2MySpace;
cmsHTRANSFORM mySpace2Out;
lcm2Sampler_f sampler;
void * sampler_variables;
int channelsIn;
int channelsProcess;
int channelsOut;
} lcm2Cargo_s;
int lcm2samplerDouble ( double in[],
double out[],
void * Cargo )
{
int i;
lcm2Cargo_s * d = (lcm2Cargo_s*) Cargo;
// color convert from input space to prcess color space
if(d->in2MySpace)
cmsDoTransform( d->in2MySpace, in, in, 1 );
// custom data processing
d->sampler(in,out,d->sampler_variables);
// converting from process space to output space
if(d->mySpace2Out)
cmsDoTransform( d->mySpace2Out, out, out, 1 );
// clipping
for(i = 0; i < d->channelsOut; ++i)
{
if(out[i] > 1.0)
out[i] = 1.0;
if(out[i] < 0.0)
out[i] = 0.0;
}
return TRUE;
}
int lcm2sampler16 (const cmsUInt16Number In[],
cmsUInt16Number Out[],
void * Cargo)
{
int i, v, result = TRUE;
double in[max_channels], out[max_channels],
scaler = 65536.0;
lcm2Cargo_s * d = (lcm2Cargo_s*) Cargo;
for(i = 0; i < d->channelsIn; ++i)
in[i] = In[i] / scaler;
result = lcm2samplerDouble( in, out, Cargo );
for(i = 0; i < d->channelsOut; ++i)
{
v = out[i] * scaler;
// integer clipping
if(v > 65535)
Out[i] = 65535;
else
Out[i] = v;
}
return result;
}
int lcm2samplerFloat ( const cmsFloat32Number In[],
cmsFloat32Number Out[],
void * Cargo )
{
int i, result = TRUE;
double in[max_channels], out[max_channels];
lcm2Cargo_s * d = (lcm2Cargo_s*) Cargo;
for(i = 0; i < d->channelsIn; ++i)
in[i] = In[i];
result = lcm2samplerDouble( in, out, Cargo );
for(i = 0; i < d->channelsOut; ++i)
Out[i] = out[i];
return result;
}
/** \addtogroup profiler ICC profiler API
* @brief Easy to use API to generate matrix and LUT ICC profiles.
*
* @{ */
/** Function lcm2OpenProfileFile
* @brief Open a profile from file
*
* @code
// create ICC profile with linear gamma, RGB.709 primaries + D65 from wildcard
if(in_space_profile) h_in_space = lcm2OpenProfileFile( "*srgblinear", NULL );
@endcode
*
* @param[in] my_space_profile operating color space.
* Use a file name or
* possible wildcards:
* - *srgblinear
* - *srgb
* - *lab
* - *rec601.625.linear
* - *rec601.525.linear
* @param[in] my_space_profile_path path name for
* for my_space_profile; optional
* @return lcms profile handle
*
* @version Oyranos: 0.9.6
* @date 2016/03/04
* @since 2016/03/04 (Oyranos: 0.9.6)
*/
cmsHPROFILE lcm2OpenProfileFile ( const char * my_space_profile,
const char * my_space_profile_path )
{
cmsHPROFILE h_my_space = 0;
if(my_space_profile_path == NULL) my_space_profile_path = "";
if(my_space_profile && my_space_profile[0])
{
char * full_name = (char*) malloc(strlen(my_space_profile_path) + strlen(my_space_profile) + 1);
if(!full_name) return NULL;
sprintf( full_name, "%s%s", my_space_profile_path, my_space_profile );
if(strcmp(my_space_profile,"*lab") == 0)
h_my_space = cmsCreateLab4Profile(cmsD50_xyY());
else
if(strcmp(my_space_profile,"*xyz") == 0)
h_my_space = cmsCreateXYZProfile( );
else
if(strcmp(my_space_profile,"*srgb") == 0)
h_my_space = cmsCreate_sRGBProfile( );
else
if(strcmp(my_space_profile,"*srgblinear") == 0)
h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.64, 0.33,
0.30, 0.60,
0.15, 0.06,
0.3127,0.329 );
else /* ITU-R BT.601-7 625-line, 50 field/s systems */
if(strcmp(my_space_profile,"*rec601.625.linear") == 0)
h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.64, 0.33,
0.29, 0.60,
0.15, 0.06,
0.3127,0.329 );
else /* ITU-R BT.601-7 525-line, 60/1.001, field/s systems */
if(strcmp(my_space_profile,"*rec601.525.linear") == 0)
h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.63, 0.34,
0.31, 0.595,
0.155, 0.07,
0.3127,0.329 );
if(!h_my_space)
h_my_space = cmsOpenProfileFromFile( full_name, "rb" );
if(!h_my_space) { lcm2msg_p( 300, NULL, "no profile from %s", full_name); }
/*else printf("will use %s\n", full_name);*/
lcm2Free_m(full_name);
}
return h_my_space;
}
/** Function lcm2WriteProfileToFile
* @brief Write a profile to a file
*
* Suggested is a scheme of "space version vendor.icc".
*
* @code
// "My-Space_v1.0_myna.icc"
char * file_name = lcm2WriteProfileToFile( my_space_profile,
"My-Space", "v1.0", "myna" );
@endcode
*
* @param[in] my_space_profile the profile
* @param[in] my_space_profile_name the color space name
* @param[in] my_space_profile_version the version of the profile; optional
* @param[in] vendor_four_bytes the vendor, just four bytes; optional
* @return constructed file name;
* can be released with free()
*
* @version Oyranos: 0.9.6
* @date 2016/03/06
* @since 2016/02/16 (Oyranos: 0.9.6)
*/
char * lcm2WriteProfileToFile ( cmsHPROFILE my_space_profile,
const char * my_space_profile_name,
const char * my_space_profile_version,
const char * vendor_four_bytes )
{
int i;
i = 0;
char * fn = (char*) malloc(strlen(my_space_profile_name) +
(my_space_profile_version ? strlen(my_space_profile_version):0) +
(vendor_four_bytes ? strlen(vendor_four_bytes):0) + 8);
if(!fn) return fn;
sprintf( fn, "%s%s%s%s%s%s", my_space_profile_name,
my_space_profile_version ? " " : "", my_space_profile_version?my_space_profile_version:"",
vendor_four_bytes ? " " : "", vendor_four_bytes?vendor_four_bytes:"",
strstr(my_space_profile_name, ".icc") ? "" : ".icc" );
while(fn[i]) { if(fn[i] == ' ') fn[i] = '_'; ++i; }
cmsSaveProfileToFile( my_space_profile, fn );
return fn;
}
/** Function lcm2WriteProfileToMem
*
* Save a cmsHPROFILE to a in memory data blob
*
* @version Oyranos: 0.9.7
* @since 2008/12/28 (Oyranos: 0.9.7)
* @date 2017/06/07
*/
void * lcm2WriteProfileToMem ( cmsHPROFILE * profile,
size_t * size,
void * (*allocateFunc)(size_t size) )
{
int error = !profile;
void * data = 0;
cmsUInt32Number size_ = 0;
if(!error)
{
*size = 0;
if(!cmsSaveProfileToMem( profile, NULL, &size_ ))
lcm2msg_p( 300, NULL, "cmsSaveProfileToMem failed" );
if(size_)
{
if(allocateFunc)
data = allocateFunc( size_ );
else
data = malloc( size_ );
cmsSaveProfileToMem( profile, data, &size_ );
} else
lcm2msg_p( 300, NULL, "can not convert lcms2 profile to memory" );
*size = size_;
} else
lcm2msg_p( 301, NULL, "no profle" );
return data;
}
/* --- CIE*Lab space familiy --- */
/** \addtogroup samplers Samplers
* @{ */
static double CIE_C_scaler = M_SQRT2; /* fit all Lab into LCh */
/** Function lcm2SamplerLab2LCh
* @brief CIE*Lab -> CIE*LCh in PCS*Lab range
*
* The CIE*C channel is scaled to contain all CIE*Lab colors.
* The ICC PCS*Lab space with range of 0->1 for all channels is utilised to
* be useful as a sampler argument to lcm2CreateProfileLutByFunc().
*
* @param[in] i input Lab triple
* @param[out] o output LCh triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/13/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerLab2LCh ( const double i[],
double o[],
void * none OY_UNUSED )
{
double a = (i[1] - 0.5) * CIE_C_scaler,
b = (i[2] - 0.5) * CIE_C_scaler;
/* CIE*L */
o[0] = i[0];
/* CIE*C = sqrt(CIE*a² + CIE*b²) */
o[1] = hypot(a,b);
/* CIE*h = atan2(CIE*b, CIE*a) */
o[2] = atan2(b,a)/M_PI/2.0 + 0.5;
}
/** Function lcm2SamplerLCh2Lab
* @brief CIE*LCh -> CIE*Lab in PCS*Lab range
*
* The CIE*C channel is scaled to contain all CIE*Lab colors.
* The ICC PCS*Lab space with range of 0->1 for all channels is utilised to
* be useful as a sampler argument to lcm2CreateProfileLutByFunc().
*
* @param[in] i input LCh triple
* @param[out] o output Lab triple
* @param[out] none unused
*
* @version Oyranos: 0.9.7
* @date 2017/12/05
* @since 2016/13/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerLCh2Lab ( const double i[],
double o[],
void * none OY_UNUSED )
{
/* CIE*L */
o[0] = i[0];
/* CIE*a = C * cos(h) */
o[1] = 1.0 - (i[1] * cos(M_PI*2.0*i[2]) / CIE_C_scaler + 0.5);
/* CIE*b = C * sin(h) */
o[2] = 1.0 - (i[1] * sin(M_PI*2.0*i[2]) / CIE_C_scaler + 0.5);
}
/* sRGB */
cmsViewingConditions lcm2_vc_srgb_ =
{
{ 95.05, 100.0, 108.88 }, /* D65 white point */
20, /* viewing background luminance Yb */
4, /* ambient in cd/m² (== 64 lux) */
2, /* Dim sourround */
1 /* adapted (0-1) */
};
/** Function lcm2SamplerJCh2Lab
* @brief CIE*LCh -> CIE*Lab in PCS*Lab range
*
* The CIE*C channel is scaled to contain all CIE*Lab colors.
* The ICC PCS*Lab space with range of 0->1 for all channels is utilised to
* be useful as a sampler argument to lcm2CreateProfileLutByFunc().
*
* @param[in] i input LCh triple
* @param[out] o output Lab triple
* @param[in] v (cmsViewingConditions*); optional, default sRGB
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2SamplerJCh2Lab ( const double i[],
double o[],
void * v )
{
cmsViewingConditions * vc = &lcm2_vc_srgb_;
cmsHANDLE vh;
cmsCIEXYZ XYZ;
cmsJCh JCh = { i[0], i[1], i[2] };
vh = cmsCIECAM02Init( NULL, v?v:vc );
cmsCIECAM02Reverse( vh, &JCh, &XYZ );
cmsCIECAM02Done( vh );
lcm2CIEXYZ2iccLab( &XYZ, o );
}
/** Function lcm2SamplerLab2JCh
* @brief CIE*Lab -> CIE*JCh
*
* The CIECAM02 appearance space.
*
* @param[in] i input Lab triple
* @param[out] o output JCh triple
* @param[in] v (cmsViewingConditions*); optional, default sRGB
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2SamplerLab2JCh ( const double i[],
double o[],
void * v )
{
cmsViewingConditions * vc = &lcm2_vc_srgb_;
cmsHANDLE vh;
cmsCIEXYZ XYZ;
cmsJCh JCh;
lcm2iccLab2CIEXYZ( i, &XYZ );
vh = cmsCIECAM02Init( NULL, v?v:vc );
cmsCIECAM02Forward( vh, &XYZ, &JCh );
cmsCIECAM02Done( vh );
o[0] = JCh.J;
o[1] = JCh.C;
o[2] = JCh.h;
}
/* --- YCbCr space familiy --- */
typedef enum {
ITU_R_BT_601,
ITU_R_BT_601_JPEG,
ITU_REC_709,
ITU_R_BT_2020
} ITU_Std_e;
const char * ITU_Std_dscr [] = { "ITU-R BT.601", "ITU-R BT.601 / JPEG", "ITU REC-709", "ITU-R BT.2020", NULL };
static void selectKbKr( ITU_Std_e ITU_Std, double * Kb, double * Kr )
{
switch(ITU_Std)
{
case ITU_R_BT_601:
case ITU_R_BT_601_JPEG:
// ITU-R BT.601 - JPEG
*Kb = 0.114;
*Kr = 0.299;
break;
case ITU_REC_709:
// ITU REC-709
*Kb = 0.0722;
*Kr = 0.2126;
break;
case ITU_R_BT_2020:
// ITU-R BT.2020
*Kb = 0.0593;
*Kr = 0.2627;
break;
}
}
void selectBlackScale( ITU_Std_e ITU_Std, double * black, double * scale )
{
switch(ITU_Std)
{
case ITU_R_BT_601_JPEG:
*black = 0;
*scale = 255;
break;
case ITU_R_BT_601:
case ITU_REC_709:
case ITU_R_BT_2020:
*black = 16;
*scale = 219;
break;
}
}
void linear2ycbcr( double *L_ )
{
double L = *L_;
double alpha = 1.09929682680944,
beta = 0.018053968510807;
// linear -> gamma
if(L < beta)
L *= 4.5;
else
L = pow(L,0.45) - (alpha - 1);
*L_ = L;
}
void ycbcr2linear( double *V_ )
{
double L = *V_;
double alpha = 1.09929682680944,
beta = 0.081243; /* 0.018053968510807 * 4.5 */
// linear -> gamma
if(L < beta)
L /= 4.5;
else
L = pow( (L + (alpha-1)) / alpha, 1.0/0.45 );
*V_ = L;
}
static void rgb2ycbcr( double R, double G, double B,
double *Y_, double *Pb_, double *Pr_,
double Kb, double Kr )
{
double Y,Pb,Pr;
// common RGB -> YCbCr formula
Y = Kr * R + (1.0-Kr-Kb) * G + Kb * B;
Pb = 1.0/2.0 * (B-Y)/(1.0-Kb);
Pr = 1.0/2.0 * (R-Y)/(1.0-Kr);
*Y_ = Y; *Pb_ = Pb; *Pr_ = Pr;
}
static void ycbcr2rgb( double Y, double Pb, double Pr,
double *R_, double *G_, double *B_,
double Kb, double Kr )
{
double R,G,B;
// common YCbCr -> RGB formula
// Pb = 1.0/2.0 * (B-Y)/(1.0-Kb);
// 2*Pb = (B-Y)/(1-Kb)
// 2*Pb*(1-Kb) = B-Y
// 2*Pb*(1-Kb)+Y = B
B = 2*Pb*(1-Kb) + Y;
// Pr = 1.0/2.0 * (R-Y)/(1.0-Kr);
// 2*Pr*(1-Kr)+Y = R
R = 2*Pr*(1-Kr) + Y;
// Y = Kr * R + (1.0-Kr-Kb) * G + Kb * B;
// Y-(Kr*R)-(Kb*B) = (1-Kb-Kr) * G
// (Y-(Kr*R)-(Kb*B))/(1-Kb-Kr) = G
G = (Y - Kr*R - Kb*B)/(1.0-Kb-Kr);
*R_ = R; *G_ = G; *B_ = B;
}
static void scaleRGB( ITU_Std_e ITU_Std, double scale, double * R, double * G, double * B )
{
switch(ITU_Std)
{
case ITU_R_BT_601: // ITU-R BT.601
case ITU_REC_709: // ITU REC-709
case ITU_R_BT_2020: // ITU-R BT.2020
case ITU_R_BT_601_JPEG: // ITU-R BT.601 - JPEG
*R *= scale;
*G *= scale;
*B *= scale;
break;
}
}
static void scaleLinearToYCbCr( ITU_Std_e ITU_Std, double max, double * Y, double * Cb, double * Cr )
{
max /= 255.0;
switch(ITU_Std)
{
case ITU_R_BT_601: // ITU-R BT.601
case ITU_REC_709: // ITU REC-709
case ITU_R_BT_2020: // ITU-R BT.2020
*Y *= (235.*max-16.*max);
*Y += 16.*max;
*Cb *= (240.*max-16.*max);
*Cb += 128.*max;
*Cr *= (240.*max-16.*max);
*Cr += 128.*max;
break;
case ITU_R_BT_601_JPEG: // ITU-R BT.601 - JPEG
*Y *= 255.*max;
*Cb *= 255.*max;
*Cb += 128.*max;
*Cr *= 255.*max;
*Cr += 128.*max;
break;
}
}
static void scaleYCbCrToLinear( ITU_Std_e ITU_Std, double max, double * Y, double * Cb, double * Cr )
{
max /= 255.0;
switch(ITU_Std)
{
case ITU_R_BT_601: // ITU-R BT.601
case ITU_REC_709: // ITU REC-709
case ITU_R_BT_2020: // ITU-R BT.2020
*Y -= 16.*max;
*Y /= (235.*max-16.*max);
*Cb -= 128.*max;
*Cb /= (240.*max-16.*max);
*Cr -= 128.*max;
*Cr /= (240.*max-16.*max);
break;
case ITU_R_BT_601_JPEG: // ITU-R BT.601 - JPEG
*Y /= 255.*max;
*Cb -= 128.*max;
*Cb /= 255.*max;
*Cr -= 128.*max;
*Cr /= 255.*max;
break;
}
}
/** Function lcm2SamplerRGB2JpegYCbCr
* @brief RGB -> YCbCr in Jpeg range
*
* ITU R BT 601 / REC.601 coefficients with Jpeg range of 0-1 is generated.
*
* @param[in] i input RGB triple
* @param[out] o output REC.601 YCbCr in JPEG range triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/07 (Oyranos: 0.9.6)
*/
void lcm2SamplerRGB2JpegYCbCr (
const double i[],
double o[],
void * none OY_UNUSED )
{
/* final space PCS.Lab -> YCbCr */
/** Jpeg assumes no gamma correction.
* Thus this sampler converts from RGB.
*/
ITU_Std_e std = ITU_R_BT_601_JPEG;
double Kr,Kb,
Y = i[0], Pb = i[1], Pr = i[2],
R = i[0], G = i[1], B = i[2];
selectKbKr( std, &Kb, &Kr );
scaleRGB( std, 1.0, &R, &G, &B );
rgb2ycbcr( R, G, B, &Y, &Pb, &Pr, Kb,Kr );
scaleLinearToYCbCr( std, 1.0, &Y, &Pb, &Pr );
o[0] = Y; o[1] = Pb; o[2] = Pr;
}
/** Function lcm2SamplerRGB2JpegYCbCr
* @brief YCbCr in Jpeg range -> RGB
*
* ITU R BT 601 / REC.601 coefficients in Jpeg range of 0-1 is assumed.
*
* @param[in] i input REC.601 YCbCr in JPEG range triple
* @param[out] o output RGB triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerJpegYCbCr2RGB( const double i[],
double o[],
void * none OY_UNUSED )
{
/* final space YCbCr -> PCS.Lab
* Jpeg assumes no gamma correction
* Thus this sampler converts to RGB
* YCbCr -> scale range -> linear YCbCr -> (linear RGB (REC.709) -> Lab)
*/
ITU_Std_e std = ITU_R_BT_601_JPEG;
double Kr,Kb,
Y = i[0], Pb = i[1], Pr = i[2],
R,G,B;
selectKbKr( std, &Kb, &Kr );
scaleYCbCrToLinear( std, 1.0, &Y, &Pb, &Pr );
ycbcr2rgb( Y, Pb, Pr, &R, &G, &B, Kb,Kr );
scaleRGB( std, 1.0, &R, &G, &B );
o[0] = R; o[1] = G; o[2] = B;
}
/** Function lcm2SamplerIdendity
* @brief Lab -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] none unused
*
* @version Oyranos: 0.9.7
* @date 2018/02/26
* @since 2018/02/26 (Oyranos: 0.9.7)
*/
void lcm2SamplerIdendity ( const double i[],
double o[],
void * none OY_UNUSED )
{
o[0] = i[0]; // L / CIE*L / Y / R
o[1] = i[1]; // M / CIE*a / Cb / G
o[2] = i[2]; // S / CIE*b / Cr / B
}
/** Function lcm2SamplerGrayer
* @brief Lab -> Gray -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerGrayer ( const double i[],
double o[],
void * none OY_UNUSED )
{
o[0] = i[0]*1.0; // L / CIE*L / Y / R
o[1] = 0.5; // M / CIE*a / Cb / G
o[2] = 0.5; // S / CIE*b / Cr / B
}
/** Function lcm2SamplerBlacknWhite
* @brief Lab -> Black&White -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/13 (Oyranos: 0.9.6)
*/
void lcm2SamplerBlacknWhite ( const double i[],
double o[],
void * none OY_UNUSED )
{
if(i[0] < 0.5)
o[0] = 0.0; // L / CIE*L / Y / R
else
o[0] = 1.0; // L / CIE*L / Y / R
o[1] = 0.5; // M / CIE*a / Cb / G
o[2] = 0.5; // S / CIE*b / Cr / B
}
/** Function lcm2SamplerSepia
* @brief Lab -> LCh -> Yellow -> LCh -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
* Creates a single reddish hue.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/14
* @since 2016/03/14 (Oyranos: 0.9.6)
*/
void lcm2SamplerSepia ( const double i[],
double o[],
void * none )
{
double in[3],out[3];
lcm2SamplerLab2LCh( i,in,none );
out[0] = in[0];
out[1] = 0.04+0.04*in[0];
out[2] = 0.18;
lcm2SamplerLCh2Lab( out,o,none );
}
/** Function lcm2SamplerReddish
* @brief Lab -> reddish tint -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
* Same like Sepia, but gives all colors a reddish tint.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] none unused
*
* @version Oyranos: 0.9.6
* @date 2016/03/15
* @since 2016/03/15 (Oyranos: 0.9.6)
*/
void lcm2SamplerReddish ( const double i[],
double o[],
void * none OY_UNUSED )
{
o[0] = i[0];
o[1] = i[1] + 0.012+0.012*i[0];
o[2] = i[2] + 0.025+0.025*i[0];
}
/** Function lcm2SamplerWhitePointLab
* @brief Lab -> White Point Adaption -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
* Same like reddish, but adapts all colors to a given white point difference.
* It uses simple linear adaption inside CIE*Lab.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] data pointer to array of two doubles with
* desired ICC*ab differences
*
* @version Oyranos: 0.9.7
* @date 2017/05/17
* @since 2017/05/17 (Oyranos: 0.9.7)
*/
void lcm2SamplerWhitePointLab( const double i[],
double o[],
void * data )
{
double * icc_ab = (double*) data;
o[0] = i[0];
o[1] = i[1] + icc_ab[0] * i[0];
o[2] = i[2] + icc_ab[1] * i[0];
}
/** Function lcm2iccLab2CIEXYZ
* @brief ICC*Lab -> CIE*XYZ
*
* Converts from PCS Lab encoding to lcms XYZ type.
*
* @param[in] i input Lab triple in PCS range
* @param[out] o output XYZ struct
* @param[out] none unused
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2iccLab2CIEXYZ ( const double * icc_Lab,
cmsCIEXYZ * XYZ )
{
cmsCIELab Lab;
Lab.L = icc_Lab[0] * 100.0;
Lab.a = icc_Lab[1] * 257.0 - 128.0;
Lab.b = icc_Lab[2] * 257.0 - 128.0;
cmsLab2XYZ( cmsD50_XYZ(), XYZ, &Lab);
}
/** Function lcm2CIEXYZ2iccLab
* @brief CIE*XYZ -> ICC*Lab
*
* Converts from lcms XYZ type to PCS Lab encoding.
*
* @param[in] i input XYZ struct
* @param[out] o output Lab triple in PCS range
* @param[out] none unused
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2CIEXYZ2iccLab ( const cmsCIEXYZ * XYZ,
double * icc_Lab )
{
cmsCIELab Lab;
cmsXYZ2Lab( cmsD50_XYZ(), &Lab, XYZ );
icc_Lab[0] = Lab.L / 100.0;
icc_Lab[1] = (Lab.a + 128.0) / 257.0;
icc_Lab[2] = (Lab.b + 128.0) / 257.0;
}
/** Function lcm2iccXYZ2iccLab
* @brief ICC*XYZ -> ICC*Lab
*
* Converts from PCS XYZ to PCS Lab encoding.
*
* @param[in] i input XYZ triple
* @param[out] o output Lab triple in PCS range
* @param[out] none unused
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2iccXYZ2iccLab ( const double * XYZ,
double * icc_Lab )
{
cmsCIEXYZ XYZ_ = { XYZ[0], XYZ[1], XYZ[2] };
lcm2CIEXYZ2iccLab( &XYZ_, icc_Lab );
}
/** Function lcm2SamplerWhitePointBradford
* @brief Lab -> Bradford White Point Adaption -> Lab
*
* PCS Lab range of 0-1 for all channels is assumed.
* Same like reddish, but adapts all colors to a given white point difference.
* It uses Bradford CAT.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] data pointer to array of two doubles with
* source ICC*XYZ white point, followed by
* destination ICC*XYZ whitepoint
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2018/02/28 (Oyranos: 0.9.7)
*/
void lcm2SamplerWhitePointBradford ( const double i[],
double o[],
void * data )
{
double * icc_XYZ = (double*) data;
double scale = 100.0;
cmsCIEXYZ srcXYZwtpt, iXYZ, oXYZ, dstXYZillu;
srcXYZwtpt.X = icc_XYZ[0] * scale;
srcXYZwtpt.Y = icc_XYZ[1] * scale;
srcXYZwtpt.Z = icc_XYZ[2] * scale;
dstXYZillu.X = icc_XYZ[3+0] * scale;
dstXYZillu.Y = icc_XYZ[3+1] * scale;
dstXYZillu.Z = icc_XYZ[3+2] * scale;
lcm2iccLab2CIEXYZ( i, &iXYZ );
cmsAdaptToIlluminant( &oXYZ, &srcXYZwtpt, &dstXYZillu, &iXYZ );
lcm2CIEXYZ2iccLab( &oXYZ, o );
}
/** Function lcm2SamplerProof
* @brief Lab -> proofing profile -> Lab
*
* Convert a proofing profile into a abstract one.
* Abstract profiles can easily be merged into a multi profile transform.
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] data pointer to array of two void* with
* - desired cmsHTRANSFORM
* for uint32_t arrays in PT_Lab
* - cmsFLAGS_GAMUTCHECK flag
*
* @version Oyranos: 0.9.7
* @since 2009/11/04 (Oyranos: 0.1.10)
* @date 2017/06/03
*/
void lcm2SamplerProof ( const double i[],
double o[],
void * data )
{
cmsCIELab Lab1, Lab2;
double d;
cmsFloat32Number i_[3], o_[3];
void ** ptr = (void**)data;
i_[0] = Lab1.L = i[0] * 100.0;
i_[1] = Lab1.a = i[1] * 257.0 - 128.0;
i_[2] = Lab1.b = i[2] * 257.0 - 128.0;
cmsDoTransform( ptr[0], i_, o_, 1 );
Lab2.L = o_[0]; Lab2.a = o_[1]; Lab2.b = o_[2];
d = cmsDeltaE( &Lab1, &Lab2 );
if((fabs(d) > 10) && ptr[1] != NULL)
{
Lab2.L = 50.0;
Lab2.a = Lab2.b = 0.0;
}
o[0] = Lab2.L/100.0;
o[1] = (Lab2.a + 128.0) / 257.0;
o[2] = (Lab2.b + 128.0) / 257.0;
}
/** Function lcm2SamplerProofD
* @brief Lab -> proofing profile -> Lab
*
* Convert a proofing profile into a abstract one.
* Abstract profiles can easily be merged into a multi profile transform.
* PCS Lab range of 0-1 for all channels is assumed.
*
* @param[in] i input PCS.Lab triple
* @param[out] o output PCS.Lab triple
* @param[out] data pointer to array of two void* with
* - desired cmsHTRANSFORM and
* for uint64_t arrays in PT_Lab
* - cmsFLAGS_GAMUTCHECK flag
*
* @version Oyranos: 0.9.7
* @since 2009/11/04 (Oyranos: 0.1.10)
* @date 2017/11/06
*/
void lcm2SamplerProofD ( const double i[],
double o[],
void * data )
{
cmsCIELab Lab1, Lab2;
double d;
cmsFloat64Number i_[3], o_[3];
void ** ptr = (void**)data;
i_[0] = Lab1.L = i[0] * 100.0;
i_[1] = Lab1.a = i[1] * 257.0 - 128.0;
i_[2] = Lab1.b = i[2] * 257.0 - 128.0;
cmsDoTransform( ptr[0], i_, o_, 1 );
Lab2.L = o_[0]; Lab2.a = o_[1]; Lab2.b = o_[2];
d = cmsDeltaE( &Lab1, &Lab2 );
if((fabs(d) > 10) && ptr[1] != NULL)
{
Lab2.L = 50.0;
Lab2.a = Lab2.b = 0.0;
}
o[0] = Lab2.L/100.0;
o[1] = (Lab2.a + 128.0) / 257.0;
o[2] = (Lab2.b + 128.0) / 257.0;
}
/** @} */ /* samplers */
/** Function lcm2CreateProfileLutByFunc
* @brief Generate a ICC profile LUT
*
* This function takes a series of parameters and functions to create a
* ICC profile from. The sampler function operates in a input space and
* and creates colors in a output space. These values are filled into the
* profile LUT. It is possible to create effect profiles of class abstract
* or LUT profiles in any other color space including device links.
*
* For some already available sampler funtions see @ref samplers.
*
* @param[in,out] profile profile to add LUT table
* @param[in] samplerMySpace the function to fill the LUT with color
* @param[in] samplerArg data pointer to samplerMySpace
* @param[in] my_space_profile operating color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] in_space_profile input color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] out_space_profile output color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] tag_sig tag signature for the generated LUT;
*
* @version Oyranos: 0.9.7
* @date 2017/05/17
* @since 2009/11/04 (Oyranos: 0.1.10)
*/
int lcm2CreateProfileLutByFunc (
cmsHPROFILE profile,
lcm2Sampler_f samplerMySpace,
void * samplerArg,
const char * in_space_profile,
const char * my_space_profile,
const char * out_space_profile,
int grid_size,
cmsTagSignature tag_sig
)
{
cmsToneCurve * t[max_channels];
int i;
int error = 0;
if(!profile) return 1;
t[0] = cmsBuildGamma(0, 1.0);
if(!t[0]) return 1;
for(i = 1; i < max_channels; ++i) t[i] = t[0];
error = lcm2CreateProfileLutByFuncAndCurves (
profile,
samplerMySpace,
samplerArg,
t, t,
in_space_profile,
my_space_profile,
out_space_profile,
grid_size, tag_sig
);
cmsFreeToneCurve( t[0] );
return error;
}
/** Function lcm2CreateProfileLutByFuncAndCurves
* @brief Generate a ICC profile LUT
*
* This function takes a series of parameters and functions to create a
* ICC profile from. The sampler function operates in a input space and
* and creates colors in a output space. These values are filled into the
* profile LUT. It is possible to create effect profiles of class abstract
* or LUT profiles in any other color space including device links.
*
* For some already available sampler funtions see @ref samplers.
*
* @param[in,out] profile profile to add LUT table
* @param[in] samplerMySpace the function to fill the LUT with color
* @param[in] samplerArg data pointer to samplerMySpace
* @param[in] in_curves input curves
* @param[in] out_curves output curves
* @param[in] my_space_profile operating color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] in_space_profile input color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] out_space_profile output color space
* for samplerMySpace(); for wildcards see
* lcm2OpenProfileFile()
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] tag_sig tag signature for the generated LUT;
*
* @version Oyranos: 0.9.6
* @date 2017/05/17
* @since 2009/11/04 (Oyranos: 0.1.10)
*/
int lcm2CreateProfileLutByFuncAndCurves (
cmsHPROFILE profile,
lcm2Sampler_f samplerMySpace,
void * samplerArg,
cmsToneCurve * in_curves[],
cmsToneCurve * out_curves[],
const char * in_space_profile,
const char * my_space_profile,
const char * out_space_profile,
int grid_size,
cmsTagSignature tag_sig
)
{
cmsHPROFILE h_in_space = 0,
h_my_space = 0,
h_out_space = 0;
cmsHTRANSFORM tr_In2MySpace = 0, tr_MySpace2Out = 0;
cmsStage * gmt_lut = 0, * gmt_lut16 = 0;
cmsPipeline * gmt_pl = cmsPipelineAlloc( 0,3,3 ),
* gmt_pl16 = cmsPipelineAlloc( 0,3,3 );
lcm2Cargo_s cargo;
int i;
int error = 0;
int in_layout, my_layout, out_layout;
in_layout = my_layout = out_layout = (FLOAT_SH(1)|CHANNELS_SH(3)|BYTES_SH(0));
if(!profile) return 1;
if(in_space_profile) h_in_space = lcm2OpenProfileFile( in_space_profile, NULL );
if(my_space_profile) h_my_space = lcm2OpenProfileFile( my_space_profile, NULL );
if(out_space_profile)h_out_space = lcm2OpenProfileFile( out_space_profile, NULL );
if(h_in_space && h_my_space && strcmp(in_space_profile,my_space_profile) != 0)
{
tr_In2MySpace = cmsCreateTransformTHR ( 0, h_in_space, in_layout,
h_my_space, my_layout,
INTENT_RELATIVE_COLORIMETRIC,
cmsFLAGS_NOOPTIMIZE);
if(!tr_In2MySpace) { lcm2msg_p( 300, NULL, "no transform"); error = 1; goto lcm2CreateProfileLutByFuncAndCurvesClean; }
}
if(h_my_space && h_out_space && strcmp(my_space_profile,out_space_profile) != 0)
{
tr_MySpace2Out = cmsCreateTransformTHR( 0, h_my_space, my_layout,
h_out_space, out_layout,
INTENT_RELATIVE_COLORIMETRIC,
cmsFLAGS_NOOPTIMIZE);
if(!tr_MySpace2Out) { lcm2msg_p( 300, NULL, "no transform"); error = 1; goto lcm2CreateProfileLutByFuncAndCurvesClean; }
}
memset(&cargo, 0, sizeof(lcm2Cargo_s));
cargo.in2MySpace = tr_In2MySpace;
cargo.mySpace2Out = tr_MySpace2Out;
cargo.sampler = samplerMySpace;
cargo.sampler_variables = samplerArg,
cargo.channelsIn = h_in_space ? cmsChannelsOf( cmsGetColorSpace( h_in_space ) ) : 3;
cargo.channelsProcess = h_my_space ? cmsChannelsOf( cmsGetColorSpace( h_my_space ) ) : 3;
cargo.channelsOut = h_out_space ? cmsChannelsOf( cmsGetColorSpace( h_out_space ) ) : 3;
#pragma omp parallel for
for(i = 0; i < 2; ++i)
{
if(i)
{
gmt_lut16 = cmsStageAllocCLut16bit( 0, grid_size, 3,3,0 );
cmsStageSampleCLut16bit( gmt_lut16, lcm2sampler16, &cargo, 0 );
} else
{
gmt_lut = cmsStageAllocCLutFloat( 0, grid_size, 3,3,0 );
cmsStageSampleCLutFloat( gmt_lut, lcm2samplerFloat, &cargo, 0 );
}
}
/* 16-bit int */
cmsPipelineInsertStage( gmt_pl16, cmsAT_BEGIN,
cmsStageAllocToneCurves( 0, cargo.channelsIn, in_curves ) );
cmsPipelineInsertStage( gmt_pl16, cmsAT_END, gmt_lut16 );
cmsPipelineInsertStage( gmt_pl16, cmsAT_END,
cmsStageAllocToneCurves( 0, cargo.channelsOut, out_curves ) );
cmsWriteTag( profile, (tag_sig!=0)?tag_sig:cmsSigAToB0Tag, gmt_pl16 );
/* float */
/* cmsPipeline owns the cmsStage memory */
cmsPipelineInsertStage( gmt_pl, cmsAT_BEGIN,
cmsStageAllocToneCurves( 0, cargo.channelsIn, in_curves ) );
cmsPipelineInsertStage( gmt_pl, cmsAT_END, gmt_lut );
cmsPipelineInsertStage( gmt_pl, cmsAT_END,
cmsStageAllocToneCurves( 0, cargo.channelsOut, out_curves ) );
//cmsWriteTag( gmt, cmsSigDToB0Tag, gmt_pl );
lcm2CreateProfileLutByFuncAndCurvesClean:
if(h_in_space) {cmsCloseProfile( h_in_space );} h_in_space = 0;
if(h_my_space) {cmsCloseProfile( h_my_space );} h_my_space = 0;
if(h_out_space) {cmsCloseProfile( h_out_space );} h_out_space = 0;
if(tr_In2MySpace) {cmsDeleteTransform( tr_In2MySpace );} tr_In2MySpace = 0;
if(tr_MySpace2Out) {cmsDeleteTransform( tr_MySpace2Out );} tr_MySpace2Out = 0;
if(gmt_pl16) cmsPipelineFree( gmt_pl16 );
if(gmt_pl) cmsPipelineFree( gmt_pl );
return error;
}
/** Function lcm2CreateAbstractProfile
* @brief Create a effect profile of type abstract in ICC*Lab PCS
*
* Here a code example:
* @code
void samplerGrayer (const double i[],
double o[])
{
o[0] = i[0]*1.0; // L / CIE*L / Y / R
o[1] = 0.5; // M / CIE*a / Cb / G
o[2] = 0.5; // S / CIE*b / Cr / B
}
const char * name_i18n[] = {
"de", "DE", "Graustufen (MyProject)",
"en", "US", "Grayer (MyProject)"
};
lcm2CreateAbstractProfile (
samplerGrayer,
NULL,
"*lab", // CIE*Lab
5,
2.3,
"Grayer (MyProject)",
name_i18n,
"Grayer myna",
"My Project 2016",
"My Name",
ICC_2011_LICENSE,
"CIE*L",
"http://www.cie.co.at",
NULL,
NULL
);
@endcode
*
* @param[in] samplerMySpace the function to fill the LUT with color
* @param[in] samplerArg data pointer to samplerMySpace
* @param[in] my_space_profile operating color space
* for samplerMySpace();
* "*lab" will set CIE*Lab
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] icc_profile_version 2.3 or 4.3
* @param[in] my_abstract_description internal profile name
* @param[in] my_abstract_descriptions internal profile name translated
* @param[in] my_abstract_file_name profile file name. If present a ICC profile will be written to that name. optional
* @param[in] provider e.g. "My Project 2016"
* @param[in] vendor e.g. "My Name"
* @param[in] my_license e.g. "This profile is made available by %s, with permission of %s, and may be copied, distributed, embedded, made, used, and sold without restriction. Altered versions of this profile shall have the original identification and copyright information removed and shall not be misrepresented as the original profile."
* - first %%s is replaced by the provider string arg and
* - second %%s is replaced by the vendor string arg
* @param[in] device_model e.g. "My Set"
* @param[in] device_manufacturer e.g. "www.mydomain.net"
* @param[in] my_meta_data e.g. {"DOMAIN_,GROUP_","DOMAIN_key1","value1","GROUP_key2","value2"}
* @param[out] h_profile the resulting profile
*
* @version Oyranos: 0.9.7
* @date 2017/05/17
* @since 2009/11/04 (Oyranos: 0.1.10)
*/
int lcm2CreateAbstractProfile(
lcm2Sampler_f samplerMySpace,
void * samplerArg,
const char * my_space_profile,
int grid_size,
double icc_profile_version,
const char * my_abstract_description,
const char ** my_abstract_descriptions,
const char * my_abstract_file_name,
const char * provider,
const char * vendor,
const char * my_license,
const char * device_model,
const char * device_manufacturer,
const char ** my_meta_data,
cmsHPROFILE * h_profile
)
{
cmsHPROFILE profile = 0;
int error = 0;
profile = lcm2CreateProfileFragment (
"*lab", // CIE*Lab
"*lab", // CIE*Lab
icc_profile_version,
my_abstract_description,
provider, vendor, my_license,
device_model, device_manufacturer, NULL);
if(!profile) goto lcm2CreateAbstractProfileClean;
if(my_meta_data)
lcm2AddMetaTexts ( profile, my_meta_data[0], &my_meta_data[1], cmsSigMetaTag );
error = lcm2CreateProfileLutByFunc( profile, samplerMySpace, samplerArg,
"*lab", my_space_profile, "*lab",
grid_size, cmsSigAToB0Tag );
if(error) goto lcm2CreateAbstractProfileClean;
lcm2AddMluDescription ( profile, my_abstract_descriptions,
cmsSigProfileDescriptionMLTag
);
if(my_abstract_file_name)
{
char * fn = lcm2WriteProfileToFile( profile, my_abstract_file_name, 0,0 );
lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----");
lcm2Free_m(fn);
}
if(h_profile)
*h_profile = profile;
else
cmsCloseProfile( profile );
lcm2CreateAbstractProfileClean:
return error;
}
/** Function lcm2CreateAbstractTemperatureProfile
* @brief Create a effect profile of type abstract in ICC*Lab PCS from Kelvin
*
* @param[in] kelvin the desired temperature in Kelvin; ICC reference (D50) is 5000 Kelvin
* @param[in] source_white_profile a profile, e.g. the actual monitor profile; optional, default is D50
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] icc_profile_version 2.3 or 4.3
* @param[out] my_abstract_file_name profile file name
* @param[out] h_profile the resulting profile; If omitted the function will write the profile to my_abstract_file_name.
*
* @version Oyranos: 0.9.7
* @date 2017/05/17
* @since 2017/05/17 (Oyranos: 0.9.7)
*/
int lcm2CreateAbstractTemperatureProfile (
float kelvin,
cmsHPROFILE source_white_profile,
int grid_size,
double icc_profile_version,
char ** my_abstract_file_name,
cmsHPROFILE * h_profile
)
{
cmsHPROFILE profile = NULL;
cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL};
/* type[6] Y = (a * X + b) ^ Gamma + c order: {g, a, b, c} */
double curve_params[4] = {1,1,0,0}, curve_params_low[4] = {1,0.95,0,0};
int i;
cmsCIEXYZ * source_white = NULL;
const char * kelvin_meta[] = {
"EFFECT_class", "reddish,white_point,atom",
"EFFECT_type", "CIEab",
"COLORIMETRY_white_point", "yes,reddish,kelvin",
"CMF_binary", "create-abstract",
"CMF_version", "0.9.7",
"CMF_product", "Oyranos",
0,0
};
char * kelvin_name = malloc(1024);
int error = !kelvin_name;
double icc_ab[2];
char * desc = NULL;
if(error) return 1;
if(source_white_profile)
{
if(cmsIsTag(source_white_profile, cmsSigProfileDescriptionTag))
{
cmsUInt32Number n = cmsGetProfileInfoASCII(source_white_profile, cmsInfoDescription, cmsNoLanguage, cmsNoCountry, NULL, 0);
if(n)
{
desc = calloc( n+1, sizeof(char) );
if(!desc) goto lcm2CreateAbstractTemperatureProfileClean;
cmsUInt32Number nr = cmsGetProfileInfoASCII(source_white_profile, cmsInfoDescription, cmsNoLanguage, cmsNoCountry, desc, n);
if(n != nr)
lcm2msg_p( 301, NULL, "found propblem reading desc tag: %d %d", n,nr);
}
}
source_white = cmsReadTag( source_white_profile, cmsSigMediaWhitePointTag ); // MediaWhitePointTag
}
i_curve[0] = o_curve[0] = cmsBuildGamma(0, 1.0);
if(!i_curve[0]) error = 1;
for(i = 1; i < 3; ++i) { i_curve[i] = i_curve[0]; }
if(!error)
{
cmsCIExyY xyWhitePoint;
cmsFloat64Number TempK = kelvin;
/* 4000 - 25000 K */
cmsWhitePointFromTemp( &xyWhitePoint, TempK );
cmsCIEXYZ WhitePoint;
const cmsCIEXYZ * reference_white = cmsD50_XYZ();
float max_brightness;
cmsxyY2XYZ( &WhitePoint, &xyWhitePoint );
cmsCIELab LabWhitePoint;
cmsCIELab SrcLabWhitePoint;
if(source_white)
reference_white = source_white;
cmsXYZ2Lab( reference_white, &LabWhitePoint, &WhitePoint );
icc_ab[0] = LabWhitePoint.a/128.0;
icc_ab[1] = LabWhitePoint.b/128.0;
#ifndef OY_HYP
#define OY_SQRT(a,b) ((a)*(a) + (b)*(b))
#define OY_HYP(a,b) pow(OY_SQRT(a,b),1.0/2.0)
#endif
/* reduce brightness remaining inside a cone with a roof angle of 30° */
max_brightness = 1.0 - OY_HYP(icc_ab[0],icc_ab[1]/1.5);
cmsXYZ2Lab( cmsD50_XYZ(), &SrcLabWhitePoint, reference_white );
cmsXYZ2Lab( cmsD50_XYZ(), &LabWhitePoint, &WhitePoint );
lcm2msg_p( 302, NULL, "SrcW: %g %g %g LabW: %g %g %g diff: %g %g max brightness: %g",
SrcLabWhitePoint.L, SrcLabWhitePoint.a, SrcLabWhitePoint.b,
LabWhitePoint.L, LabWhitePoint.a, LabWhitePoint.b,
icc_ab[0], icc_ab[1], max_brightness );
/* avoid color clipping around the white point */
curve_params_low[1] = max_brightness;
o_curve[0] = cmsBuildParametricToneCurve(0, 6, curve_params_low);
o_curve[1] = o_curve[2] = cmsBuildParametricToneCurve(0, 6, curve_params);
if(!o_curve[0] || !o_curve[1]) error = 1;
}
if(error) goto lcm2CreateAbstractTemperatureProfileClean;
if(icc_ab[1] > 0)
{
sprintf( kelvin_name, "Reddish %d K (www.oyranos.org)", (int)kelvin );
} else if(icc_ab[1] == 0) {
sprintf( kelvin_name, "%d K (www.oyranos.org)", (int)kelvin );
kelvin_meta[1] = "neutral,white_point,atom";
kelvin_meta[3] = "yes,D50,kelvin";
} else {
sprintf( kelvin_name, "Bluish %d K (www.oyranos.org)", (int)kelvin );
kelvin_meta[1] = "bluish,white_point,atom";
kelvin_meta[3] = "yes,bluish,kelvin";
}
if(source_white_profile)
{
if(desc && strlen(desc) < 900)
sprintf( &kelvin_name[strlen(kelvin_name)], " - %s", desc);
if(icc_ab[1] > 0)
{
kelvin_meta[1] = "reddish,white_point,atom,device";
kelvin_meta[3] = "yes,reddish,kelvin";
} else if(icc_ab[1] == 0) {
kelvin_meta[1] = "neutral,white_point,atom,device";
kelvin_meta[3] = "yes,D50,kelvin";
} else {
kelvin_meta[1] = "bluish,white_point,atom,device";
kelvin_meta[3] = "yes,bluish,kelvin";
}
}
if(!error)
/* profile fragment creation */
profile = lcm2CreateProfileFragment (
"*lab", // CIE*Lab
"*lab", // CIE*Lab
icc_profile_version,
kelvin_name,
"Oyranos project 2017",
"Kai-Uwe Behrmann",
ICC_2011_LICENSE,
"CIE*Lab",
"http://www.cie.co.at",
NULL);
if(!profile) error = 1;
if(!error)
error = lcm2CreateProfileLutByFuncAndCurves( profile,
lcm2SamplerWhitePointLab, icc_ab,
o_curve, i_curve,
"*lab", "*lab", "*lab",
grid_size, cmsSigAToB0Tag );
if(!error)
lcm2AddMetaTexts ( profile, "EFFECT_,COLORIMETRY_,CMF_", kelvin_meta, cmsSigMetaTag );
lcm2CreateAbstractTemperatureProfileClean:
if(i_curve[0]) cmsFreeToneCurve( i_curve[0] );
if(o_curve[0]) cmsFreeToneCurve( o_curve[0] );
if(o_curve[1]) cmsFreeToneCurve( o_curve[1] );
*my_abstract_file_name = kelvin_name;
if(h_profile)
*h_profile = profile;
else if(profile && *my_abstract_file_name)
{
char * fn = lcm2WriteProfileToFile( profile, *my_abstract_file_name, 0,0 );
lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----");
lcm2Free_m(fn);
cmsCloseProfile( profile );
}
return error;
}
/** Function lcm2CreateAbstractWhitePointProfileLab
* @brief Create a effect profile of type abstract in ICC*Lab PCS for white point adjustment
*
* These profiles can be applied to 1D / per single channel only adjustments.
* It will be marked with EFFECT_linear=yes in the meta tag.
*
* @param[in] cie_a CIE*a correction value in -0.5 - 0.5 range
* @param[in] cie_b CIE*b correction value in -0.5 - 0.5 range
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] icc_profile_version 2.3 or 4.3
* @param[out] my_abstract_file_name profile file name
* @param[out] h_profile the resulting profile; If omitted the function will write the profile to my_abstract_file_name.
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2017/06/02 (Oyranos: 0.9.7)
*/
int lcm2CreateAbstractWhitePointProfileLab (
double cie_a,
double cie_b,
int grid_size,
double icc_profile_version,
char ** my_abstract_file_name,
cmsHPROFILE * h_profile
)
{
cmsHPROFILE profile = NULL;
cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL};
/* type[6] Y = (a * X + b) ^ Gamma + c order: {g, a, b, c} */
double curve_params[4] = {1,1,0,0}, curve_params_low[4] = {1,0.95,0,0};
int i;
const char * kelvin_meta[] = {
"EFFECT_class", "reddish,white_point,linear,atom",
"EFFECT_linear", "yes", /* can be used for 1D curves like VCGT */
"EFFECT_type", "CIEab",
"COLORIMETRY_white_point", "yes,reddish,kelvin",
"CMF_binary", "create-abstract",
"CMF_version", "0.9.7",
"CMF_product", "Oyranos",
0,0
};
char * kelvin_name = malloc(1024);
int error = !kelvin_name;
double icc_ab[2] = {cie_a, cie_b};
if(error) return 1;
i_curve[0] = cmsBuildGamma(0, 1.0);
if(!i_curve[0]) error = 1;
for(i = 1; i < 3; ++i)
{ i_curve[i] = i_curve[0]; }
if(!error)
{
#ifndef OY_HYP
#define OY_SQRT(a,b) ((a)*(a) + (b)*(b))
#define OY_HYP(a,b) pow(OY_SQRT(a,b),1.0/2.0)
#endif
/* reduce brightness remaining inside a cone with a roof angle of 30° */
double max_brightness = 1.0 - OY_HYP(icc_ab[0],icc_ab[1]/1.5);
/* avoid color clipping around the white point */
curve_params_low[1] = max_brightness;
o_curve[0] = cmsBuildParametricToneCurve(0, 6, curve_params_low);
o_curve[1] = o_curve[2] = cmsBuildParametricToneCurve(0, 6, curve_params);
if(!o_curve[0] || !o_curve[1]) error = 1;
}
if(error) goto lcm2CreateAbstractWhitePointProfileClean;
if(icc_ab[1] > 0)
{
sprintf( kelvin_name, "Reddish CIE*a %g CIE*b %g", cie_a, cie_b );
} else if(-0.001 < icc_ab[1] && icc_ab[0] < 0.001) {
sprintf( kelvin_name, "CIE*a %g CIE*b %g", cie_a, cie_b );
kelvin_meta[1] = "neutral,white_point,atom";
kelvin_meta[3] = "yes,D50,kelvin";
} else {
sprintf( kelvin_name, "Bluish CIE*a %g CIE*b %g", cie_a, cie_b );
kelvin_meta[1] = "bluish,white_point,atom";
kelvin_meta[3] = "yes,bluish,kelvin";
}
profile = lcm2CreateProfileFragment (
"*lab", // CIE*Lab
"*lab", // CIE*Lab
icc_profile_version,
kelvin_name,
"Oyranos project 2018",
"Kai-Uwe Behrmann",
ICC_2011_LICENSE,
"CIE*Lab",
"http://www.cie.co.at",
NULL);
if(!profile) goto lcm2CreateAbstractWhitePointProfileClean;
error = lcm2CreateProfileLutByFuncAndCurves( profile,
lcm2SamplerWhitePointLab, icc_ab,
o_curve, i_curve,
"*lab", "*lab", "*lab",
grid_size, cmsSigAToB0Tag );
if(!error)
lcm2AddMetaTexts ( profile, "EFFECT_,COLORIMETRY_,CMF_", kelvin_meta, cmsSigMetaTag );
lcm2CreateAbstractWhitePointProfileClean:
if(i_curve[0]) cmsFreeToneCurve( i_curve[0] );
if(o_curve[0]) cmsFreeToneCurve( o_curve[0] );
if(o_curve[1]) cmsFreeToneCurve( o_curve[1] );
*my_abstract_file_name = kelvin_name;
if(h_profile)
*h_profile = profile;
else if(profile && *my_abstract_file_name)
{
char * fn = lcm2WriteProfileToFile( profile, *my_abstract_file_name, 0,0 );
lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----");
lcm2Free_m(fn);
cmsCloseProfile( profile );
}
return error;
}
/** Function lcm2CreateAbstractWhitePointProfileBradford
* @brief Create a effect profile of type abstract in ICC*Lab PCS for white point adjustment
*
* These profiles can be applied to 1D / per single channel only adjustments.
* It will be marked with EFFECT_linear=yes in the meta tag.
*
* @param[in] source_white_profile profile with media white point as source
* @param[in] illu_iccXYZ ICC*XYZ illuminant in 0.0 - 2.0 range
* @param[in] grid_size dimensions of the created LUT; e.g. 33
* @param[in] icc_profile_version 2.3 or 4.3
* @param[out] my_abstract_file_name profile file name
* @param[out] h_profile the resulting profile; If omitted the function will write the profile to my_abstract_file_name.
*
* @version Oyranos: 0.9.7
* @date 2018/02/28
* @since 2017/06/02 (Oyranos: 0.9.7)
*/
int lcm2CreateAbstractWhitePointProfileBradford (
double * src_iccXYZ,
double * illu_iccXYZ,
int grid_size,
double icc_profile_version,
char ** my_abstract_file_name,
cmsHPROFILE * h_profile
)
{
cmsHPROFILE profile = NULL;
cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL};
/* type[6] Y = (a * X + b) ^ Gamma + c order: {g, a, b, c} */
double curve_params[4] = {1,1,0,0}, curve_params_low[4] = {1,0.95,0,0};
int i;
const char * kelvin_meta[] = {
"EFFECT_class", "reddish,type,white_point,linear,atom",
"EFFECT_linear", "yes", /* can be used for 1D curves like VCGT */
"COLORIMETRY_white_point", "yes,reddish,kelvin",
"EFFECT_type", "bradford",
"CMF_binary", "create-abstract",
"CMF_version", "0.9.7",
"CMF_product", "Oyranos",
0,0
};
char * kelvin_name = malloc(1024);
int error = !kelvin_name;
double icc_XYZ[6] = { src_iccXYZ[0], src_iccXYZ[1], src_iccXYZ[2],
illu_iccXYZ[0], illu_iccXYZ[1], illu_iccXYZ[2]};
double icc_ab[2] = {0,0};
if(error) return 1;
i_curve[0] = cmsBuildGamma(0, 1.0);
if(!i_curve[0]) error = 1;
for(i = 1; i < 3; ++i)
{ i_curve[i] = i_curve[0]; }
if(!error)
{
#ifndef OY_HYP
#define OY_SQRT(a,b) ((a)*(a) + (b)*(b))
#define OY_HYP(a,b) pow(OY_SQRT(a,b),1.0/2.0)
#endif
/* reduce brightness remaining inside a cone with a roof angle of 30° */
double max_brightness;
double src_Lab[3], dst_Lab[3];
lcm2iccXYZ2iccLab( src_iccXYZ, src_Lab );
lcm2iccXYZ2iccLab( illu_iccXYZ, dst_Lab );
icc_ab[0] = dst_Lab[1] - src_Lab[1];
icc_ab[1] = dst_Lab[2] - src_Lab[2];
max_brightness = 1.0 - OY_HYP(icc_ab[0],icc_ab[1]/1.5);
/* avoid color clipping around the white point */
curve_params_low[1] = max_brightness;
o_curve[0] = cmsBuildParametricToneCurve(0, 6, curve_params_low);
o_curve[1] = o_curve[2] = cmsBuildParametricToneCurve(0, 6, curve_params);
if(!o_curve[0] || !o_curve[1]) error = 1;
}
if(error) goto lcm2CreateAbstractWhitePointProfileBClean;
if(icc_ab[1] > 0)
{
sprintf( kelvin_name, "Reddish CIE*a %g CIE*b %g", icc_ab[0], icc_ab[1] );
} else if(-0.001 < icc_ab[1] && icc_ab[0] < 0.001) {
sprintf( kelvin_name, "CIE*a %g CIE*b %g", icc_ab[0], icc_ab[1] );
kelvin_meta[1] = "neutral,type,white_point,atom";
kelvin_meta[3] = "yes,D50,kelvin";
} else {
sprintf( kelvin_name, "Bluish CIE*a %g CIE*b %g", icc_ab[0], icc_ab[1] );
kelvin_meta[1] = "bluish,type,white_point,atom";
kelvin_meta[3] = "yes,bluish,kelvin";
}
profile = lcm2CreateProfileFragment (
"*lab", // CIE*Lab
"*lab", // CIE*Lab
icc_profile_version,
kelvin_name,
"Oyranos project 2018",
"Kai-Uwe Behrmann",
ICC_2011_LICENSE,
"Bradford",
"http://www.cie.co.at",
NULL);
if(!profile) goto lcm2CreateAbstractWhitePointProfileBClean;
error = lcm2CreateProfileLutByFuncAndCurves( profile,
lcm2SamplerWhitePointBradford, icc_XYZ,
o_curve, i_curve,
"*lab", "*lab", "*lab",
grid_size, cmsSigAToB0Tag );
if(!error)
lcm2AddMetaTexts ( profile, "EFFECT_,COLORIMETRY_,CMF_", kelvin_meta, cmsSigMetaTag );
lcm2CreateAbstractWhitePointProfileBClean:
if(i_curve[0]) cmsFreeToneCurve( i_curve[0] );
if(o_curve[0]) cmsFreeToneCurve( o_curve[0] );
if(o_curve[1]) cmsFreeToneCurve( o_curve[1] );
*my_abstract_file_name = kelvin_name;
if(h_profile)
*h_profile = profile;
else if(profile && *my_abstract_file_name)
{
char * fn = lcm2WriteProfileToFile( profile, *my_abstract_file_name, 0,0 );
lcm2msg_p( 302, NULL, "wrote to: %s", fn?fn:"----");
lcm2Free_m(fn);
cmsCloseProfile( profile );
}
return error;
}
/** Function lcm2CreateProfileFragment
* @brief Create a color profile starter
*
* In case both the in_space_profile and out_space_profile arguments are set
* to "*lab", the profile will be set to class abstract. In case the
* in_space_profile is not "*lab" and the later one is different, a color
* profile of class input will be generated. With in_space_profile not "*lab"
* and out_space_profile "*lab" a color profile of class output will be
* generated. Note such profiles have initially no backward LUT and can not
* be used for inverse color transforms, which might be a problem for general
* purpose ICC profiles. But you can add more tables if needed by passing in a
* previously created profile.
*
* All profiles generated by this function are meant to be filled with
* colorimetric data by e.g. lcm2CreateProfileLutByFunc() or
* lcm2CreateICCMatrixProfile2().
*
* Here a code example:
* @code
cmsHPROFILE profile = lcm2CreateProfileFragment (
"*srgb", // sRGB
"*lab", // CIE*Lab
2.3,
"MySpace (MyProject)",
"My Project 2016",
"My Name",
ICC_2011_LICENSE,
"My Box", "www.mydomain.net", NULL
);
@endcode
*
* @param[in] in_space_profile input color space; for wildcards see
* lcm2OpenProfileFile()
* @param[in] out_space_profile output color space; for wildcards see
* lcm2OpenProfileFile()
* @param[in] icc_profile_version 2.3 or 4.3
* @param[in] my_abstract_description internal profile name
* @param[in] provider e.g. "My Project 2016"
* @param[in] vendor e.g. "My Name"
* @param[in] my_license e.g. "This profile is made available by %s, with permission of %s, and may be copied, distributed, embedded, made, used, and sold without restriction. Altered versions of this profile shall have the original identification and copyright information removed and shall not be misrepresented as the original profile."
* first %s is provider string arg and
* second %s is filled by vendor string arg
* @param[in] device_model e.g. "My Set"
* @param[in] device_manufacturer e.g. "www.mydomain.net"; hint:
* lcms <= 2.08 writes a malformed desc tag
* @param[in,out] h_profile use existing profile; optional
*
* @version Oyranos: 0.9.6
* @date 2016/03/06
* @since 2009/11/04 (Oyranos: 0.1.10)
*/
cmsHPROFILE lcm2CreateProfileFragment(
const char * in_space_profile,
const char * out_space_profile,
double icc_profile_version,
const char * my_abstract_description,
const char * provider,
const char * vendor,
const char * my_license,
const char * device_model,
const char * device_manufacturer,
cmsHPROFILE h_profile
)
{
cmsHPROFILE h_in_space = 0,
h_out_space = 0;
cmsColorSpaceSignature csp_in, csp_out;
cmsProfileClassSignature profile_class = cmsSigAbstractClass;
cmsMLU * mlu[4] = {0,0,0,0};
int i;
char * license = NULL;
if(!h_profile)
{ h_profile = cmsCreateProfilePlaceholder( 0 ); } if(!h_profile) goto lcm2CreateProfileFragmentClean;
if(in_space_profile) h_in_space = lcm2OpenProfileFile( in_space_profile, NULL );
if(out_space_profile)h_out_space = lcm2OpenProfileFile( out_space_profile, NULL );
csp_in = cmsGetColorSpace( h_in_space );
csp_out = cmsGetColorSpace( h_out_space );
cmsSetProfileVersion( h_profile, icc_profile_version );
#define CSP_IS_PCS(csp) (csp == cmsSigLabData || csp == cmsSigXYZData)
if( CSP_IS_PCS(csp_in) && CSP_IS_PCS(csp_out) )
profile_class = cmsSigAbstractClass;
else if( CSP_IS_PCS(csp_out) )
profile_class = cmsSigInputClass;
else if( CSP_IS_PCS(csp_in) )
profile_class = cmsSigOutputClass;
else
profile_class = cmsSigLinkClass;
cmsSetDeviceClass( h_profile, profile_class );
cmsSetColorSpace( h_profile, csp_in );
cmsSetPCS( h_profile, csp_out );
for(i = 0; i < 4; ++i)
mlu[i] = cmsMLUalloc(0,1);
if(!(mlu[0] && mlu[1] && mlu[2] && mlu[3]))
return h_profile;
cmsMLUsetASCII(mlu[0], "EN", "us", my_abstract_description);
cmsWriteTag( h_profile, cmsSigProfileDescriptionTag, mlu[0] );
if(device_model)
{
cmsMLUsetASCII(mlu[1], "EN", "us", device_model);
cmsWriteTag( h_profile, cmsSigDeviceModelDescTag, mlu[1]);
}
if(device_manufacturer)
{
cmsMLUsetASCII(mlu[2], "EN", "us", device_manufacturer);
cmsWriteTag( h_profile, cmsSigDeviceMfgDescTag, mlu[2]);
}
license = (char *) malloc( strlen(my_license) + strlen(provider) + strlen(vendor) + 1 );
if(!license) goto lcm2CreateProfileFragmentClean;;
sprintf( license, my_license, provider, vendor );
cmsMLUsetASCII(mlu[3], "EN", "us", license);
cmsWriteTag( h_profile, cmsSigCopyrightTag, mlu[3]);
cmsWriteTag( h_profile, cmsSigMediaWhitePointTag, cmsD50_XYZ() );
lcm2CreateProfileFragmentClean:
if(h_in_space) { cmsCloseProfile( h_in_space ); } h_in_space = 0;
if(h_out_space) { cmsCloseProfile( h_out_space ); } h_out_space = 0;
for(i = 0; i < 4; ++i)
cmsMLUfree( mlu[i] );
lcm2Free_m(license);
return h_profile;
}
int isBigEndian ()
{ union { unsigned short u16; unsigned char c; } test = { .u16 = 1 }; return !test.c; }
/* UTF-8 to WCHAR_T conversion */
typedef uint32_t UTF32; /* at least 32 bits */
typedef uint16_t UTF16; /* at least 16 bits */
typedef uint8_t UTF8; /* typically 8 bits */
typedef unsigned char Boolean; /* 0 or 1 */
/* Some fundamental constants */
#define UNI_REPLACEMENT_CHAR (UTF32)0x0000FFFD
#define UNI_MAX_BMP (UTF32)0x0000FFFF
#define UNI_MAX_UTF16 (UTF32)0x0010FFFF
#define UNI_MAX_UTF32 (UTF32)0x7FFFFFFF
#define UNI_MAX_LEGAL_UTF32 (UTF32)0x0010FFFF
typedef enum {
conversionOK, /* conversion successful */
sourceExhausted, /* partial character in source, but hit end */
targetExhausted, /* insuff. room in target for conversion */
sourceIllegal /* source sequence is illegal/malformed */
} lcm2UtfConversionResult;
typedef enum {
strictConversion = 0,
lenientConversion
} lcm2UtfConversionFlags;
static const int halfShift = 10; /* used for shifting by 10 bits */
static const UTF32 halfBase = 0x0010000UL;
static const UTF32 halfMask = 0x3FFUL;
#define UNI_SUR_HIGH_START (UTF32)0xD800
#define UNI_SUR_HIGH_END (UTF32)0xDBFF
#define UNI_SUR_LOW_START (UTF32)0xDC00
#define UNI_SUR_LOW_END (UTF32)0xDFFF
#define false 0
#define true 1
/*
* Index into the table below with the first byte of a UTF-8 sequence to
* get the number of trailing bytes that are supposed to follow it.
* Note that *legal* UTF-8 values can't have 4 or 5-bytes. The table is
* left as-is for anyone who may want to do such conversion, which was
* allowed in earlier algorithms.
*/
static const char trailingBytesForUTF8[256] = {
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0, 0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1, 1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,
2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2, 3,3,3,3,3,3,3,3,4,4,4,4,5,5,5,5
};
/*
* Magic values subtracted from a buffer value during UTF8 conversion.
* This table contains as many values as there might be trailing bytes
* in a UTF-8 sequence.
*/
static const UTF32 offsetsFromUTF8[6] = { 0x00000000UL, 0x00003080UL, 0x000E2080UL,
0x03C82080UL, 0xFA082080UL, 0x82082080UL };
/*
* Utility routine to tell whether a sequence of bytes is legal UTF-8.
* This must be called with the length pre-determined by the first byte.
* If not calling this from ConvertUTF8to*, then the length can be set by:
* length = trailingBytesForUTF8[*source]+1;
* and the sequence is illegal right away if there aren't that many bytes
* available.
* If presented with a length > 4, this returns false. The Unicode
* definition of UTF-8 goes up to 4-byte sequences.
*/
static Boolean isLegalUTF8(const UTF8 *source, int length)
{
UTF8 a;
const UTF8 *srcptr = source+length;
switch (length) {
default: return false;
/* Everything else falls through when "true"... */
case 4: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; OY_FALLTHROUGH
case 3: if ((a = (*--srcptr)) < 0x80 || a > 0xBF) return false; OY_FALLTHROUGH
case 2: if ((a = (*--srcptr)) > 0xBF) return false;
switch (*source) {
/* no fall-through in this inner switch */
case 0xE0: if (a < 0xA0) return false; break;
case 0xED: if (a > 0x9F) return false; break;
case 0xF0: if (a < 0x90) return false; break;
case 0xF4: if (a > 0x8F) return false; break;
default: if (a < 0x80) return false; OY_FALLTHROUGH
} OY_FALLTHROUGH
case 1: if (*source >= 0x80 && *source < 0xC2) return false;
}
if (*source > 0xF4) return false;
return true;
}
lcm2UtfConversionResult lcm2ConvertUTF8toUTF16 (const UTF8** sourceStart, const UTF8* sourceEnd,
UTF16** targetStart, UTF16* targetEnd, lcm2UtfConversionFlags flags)
{
lcm2UtfConversionResult result = conversionOK;
const UTF8* source = *sourceStart;
UTF16* target = *targetStart;
while (source < sourceEnd) {
UTF32 ch = 0;
unsigned short extraBytesToRead = trailingBytesForUTF8[*source];
if (source + extraBytesToRead >= sourceEnd) {
result = sourceExhausted; break;
}
/* Do this check whether lenient or strict */
if (! isLegalUTF8(source, extraBytesToRead+1)) {
result = sourceIllegal;
break;
}
/*
* The cases all fall through. See "Note A" below.
*/
switch (extraBytesToRead) {
case 5: ch += *source++; ch <<= 6; OY_FALLTHROUGH/* remember, illegal UTF-8 */
case 4: ch += *source++; ch <<= 6; OY_FALLTHROUGH /* remember, illegal UTF-8 */
case 3: ch += *source++; ch <<= 6; OY_FALLTHROUGH
case 2: ch += *source++; ch <<= 6; OY_FALLTHROUGH
case 1: ch += *source++; ch <<= 6; OY_FALLTHROUGH
case 0: ch += *source++; OY_FALLTHROUGH
}
ch -= offsetsFromUTF8[extraBytesToRead];
if (target >= targetEnd) {
source -= (extraBytesToRead+1); /* Back up source pointer! */
result = targetExhausted; break;
}
if (ch <= UNI_MAX_BMP) { /* Target is a character <= 0xFFFF */
/* UTF-16 surrogate values are illegal in UTF-32 */
if (ch >= UNI_SUR_HIGH_START && ch <= UNI_SUR_LOW_END) {
if (flags == strictConversion) {
source -= (extraBytesToRead+1); /* return to the illegal value itself */
result = sourceIllegal;
break;
} else {
*target++ = UNI_REPLACEMENT_CHAR;
}
} else {
*target++ = (UTF16)ch; /* normal case */
}
} else if (ch > UNI_MAX_UTF16) {
if (flags == strictConversion) {
result = sourceIllegal;
source -= (extraBytesToRead+1); /* return to the start */
break; /* Bail out; shouldn't continue */
} else {
*target++ = UNI_REPLACEMENT_CHAR;
}
} else {
/* target is a character in range 0xFFFF - 0x10FFFF. */
if (target + 1 >= targetEnd) {
source -= (extraBytesToRead+1); /* Back up source pointer! */
result = targetExhausted; break;
}
ch -= halfBase;
*target++ = (UTF16)((ch >> halfShift) + UNI_SUR_HIGH_START);
*target++ = (UTF16)((ch & halfMask) + UNI_SUR_LOW_START);
}
}
*sourceStart = source;
*targetStart = target;
return result;
}
wchar_t * lcm2Utf8ToWchar ( const char * text )
{
wchar_t * wchar_out, * tmp_out;
char * in, * tmp_in;
size_t in_len = strlen(text),
out_len = in_len*sizeof(wchar_t)+sizeof(wchar_t);
lcm2UtfConversionResult error;
if(!in_len) return 0;
else ++in_len;
tmp_out = wchar_out = calloc( in_len+1, sizeof(wchar_t) );
in = tmp_in = strdup( text );
error = lcm2ConvertUTF8toUTF16( (const UTF8**)&in, (const UTF8*)in+in_len, (UTF16**)&tmp_out, (UTF16*)(tmp_out+out_len), lenientConversion );
if(error == conversionOK)
{
/* store UTF16BE in wchar_t for lcms2 */
uint16_t * icc_utf16 = (uint16_t*) wchar_out;
int i;
for(i = in_len; i >= 0; --i) wchar_out[i] = icc_utf16[i];
}
else
{
lcm2msg_p( 300, NULL, "error[%d] %lu %lu %s", error, in_len, out_len, text );
lcm2Free_m(wchar_out);
}
lcm2Free_m( tmp_in );
return wchar_out;
}
/** Function lcm2AddMluDescription
* @brief Add translated texts to a profile
*
* Iterates over the provided string list converts from "UTF-8" input
* to "WCHAR_T" for lcms and
* does byteswapping on little endian machines.
*
* Here a code example:
* @code
const char * texts[] = {
"de", "DE", "Mein Text",
"en", "US", "My Text"
};
lcm2AddMluDescription ( profile, texts,
cmsSigProfileDescriptionMLTag
);
@endcode
*
* @param[in,out] profile color profile
* @param[in] texts language + country + text list
* @param[in] tag_sig signature
*
* @version Oyranos: 0.9.6
* @date 2016/03/13
* @since 2016/03/13 (Oyranos: 0.9.6)
*/
void lcm2AddMluDescription ( cmsHPROFILE profile,
const char * texts[],
cmsTagSignature tag_sig )
{
int n = 0, i;
cmsMLU * mlu = NULL;
if(texts)
while( texts[n] ) ++n;
if(!n) return;
mlu = cmsMLUalloc( 0, n/3 + 1 );
if(!mlu) return;
for( i = 0; i < n; i += 3 )
{
char lang[4] = {0,0,0,0}, country[4] = {0,0,0,0};
const char * text = texts[i+2];
wchar_t * wchar_out;
wchar_out = lcm2Utf8ToWchar( text );
if(!wchar_out) continue;
/* the language code is stored as readable 4 byte string */
lang[0] = texts[i+0][0]; lang[1] = texts[i+0][1];
country[0] = texts[i+1][0]; country[1] = texts[i+1][1];
cmsMLUsetWide( mlu, lang, country, wchar_out );
lcm2Free_m( wchar_out );
}
cmsWriteTag( profile, tag_sig, mlu );
cmsMLUfree( mlu );
}
/** Function lcm2AddMetaTexts
* @brief Add meta data to a profile
*
* Iterates over the provided string list converts from "UTF-8" input
* to "WCHAR_T" for lcms and
* does byteswapping on little endian machines.
*
* Here a code example:
* @code
const char * texts[] = {
"GROUP_key1", "value1",
"DOMAIN_key2", "value2"
};
lcm2AddMetaTexts ( profile, "GROUP_,DOMAIN_", texts,
cmsSigMetaTag
);
@endcode
*
* A prefix allows for grouping of keys like "EDID_" or "EXIF_".
* The prefix part might be cut off in some cases to access an other level
* of keys. Think of "EDID_model" for monitors and "EXIF_model" for cameras,
* which both represent the key "model" concept.
*
* @param[in,out] profile color profile
* @param[in] prefixes The used uppercase prefix list.
* @param[in] key_value key + value list
* @param[in] tag_sig signature
*
* @version Oyranos: 0.9.7
* @date 2017/02/11
* @since 2017/02/11 (Oyranos: 0.9.7)
*/
void lcm2AddMetaTexts ( cmsHPROFILE profile,
const char * prefixes,
const char * key_value[],
cmsTagSignature tag_sig )
{
int n = 0, i;
cmsHANDLE dict = NULL;
cmsContext contextID = cmsCreateContext( NULL,NULL );
wchar_t * wchar_key = NULL, * wchar_val = NULL;
if(key_value)
while( key_value[n] ) ++n;
if(n)
dict = cmsDictAlloc( contextID );
else
lcm2msg_p( 300, NULL, "nothing to write %s", __func__ );
if(!dict)
return;
if(prefixes)
{
wchar_key = lcm2Utf8ToWchar( "prefix" );
wchar_val = lcm2Utf8ToWchar( prefixes );
}
if(wchar_key && wchar_val)
cmsDictAddEntry( dict, wchar_key, wchar_val, NULL,NULL );
lcm2Free_m( wchar_key );
lcm2Free_m( wchar_val );
for( i = 0; i < n; i += 2 )
{
const char * key = key_value[i+0],
* val = key_value[i+1];
wchar_key = lcm2Utf8ToWchar(key),
wchar_val = lcm2Utf8ToWchar(val);
if(!wchar_key || !wchar_val)
{
lcm2Free_m( wchar_key );
lcm2Free_m( wchar_val );
continue;
}
cmsDictAddEntry( dict, wchar_key, wchar_val, NULL,NULL );
lcm2Free_m( wchar_key );
lcm2Free_m( wchar_val );
}
cmsWriteTag( profile, tag_sig, dict );
cmsDictFree( dict );
}
/** Function lcm2CreateICCMatrixProfile2
* @brief Create a profile from primaries, white point and one gamma value
*
* Used for ICC from EDID, Camera RAW etc. Marti calls these matrix/shaper.
* @code
// create linear space with REC.709/sRGB primaries and D65 white point
cmsHPROFILE h_my_space = lcm2CreateICCMatrixProfile2( 1.0, 0.64,0.33, 0.30,0.60, 0.15,0.06, 0.3127,0.329 );
@endcode
*
* @version Oyranos: 0.9.6
* @date 2016/03/04
* @since 2009/10/24 (Oyranos: 0.1.10)
*/
cmsHPROFILE lcm2CreateICCMatrixProfile2 (
float gamma,
float rx, float ry,
float gx, float gy,
float bx, float by,
float wx, float wy )
{
cmsCIExyYTRIPLE p;
cmsToneCurve * g[3] = {0,0,0};
/* 0.31271, 0.32902 D65 */
cmsCIExyY wtpt_xyY;
cmsHPROFILE lp = 0;
p.Red.x = rx;
p.Red.y = ry;
p.Red.Y = 1.0;
p.Green.x = gx;
p.Green.y = gy;
p.Green.Y = 1.0;
p.Blue.x = bx;
p.Blue.y = by;
p.Blue.Y = 1.0;
wtpt_xyY.x = wx;
wtpt_xyY.y = wy;
wtpt_xyY.Y = 1.0;
g[0] = g[1] = g[2] = cmsBuildGamma(0, (double)gamma);
if(!g[0]) return NULL;
lp = cmsCreateRGBProfile( &wtpt_xyY, &p, g);
cmsFreeToneCurve( g[0] );
return lp;
}
/** Function lcm2MessageFunc
* @brief default message function to console
*
* The default message function is used as a message printer to the console
* from library start.
*
* @param code a message code understood be your message
* handler or openiccMSG_e
* @param context_object a openicc object is expected
* @param format the text format string for following args
* @param ... the variable args fitting to format
* @return 0 - success; 1 - error
*
* @version OpenICC: 0.1.0
* @date 2009/07/20
* @since 2008/04/03 (OpenICC: 0.1.0)
*/
int lcm2MessageFunc ( int/*openiccMSG_e*/ code OY_UNUSED,
const void * context_object OY_UNUSED,
const char * format,
... )
{
char * text = 0;
int error = 0;
va_list list;
size_t sz = 0;
int len = 0;
va_start( list, format);
len = vsnprintf( text, sz, format, list);
va_end ( list );
{
text = calloc( sizeof(char), len+2 );
if(!text)
{
fprintf(stderr, "Could not allocate 256 byte of memory.\n");
return 1;
}
va_start( list, format);
len = vsnprintf( text, len+1, format, list);
va_end ( list );
}
if(text)
fprintf( stderr, "%s\n", text );
lcm2Free_m( text );
return error;
}
lcm2Message_f lcm2msg_p = lcm2MessageFunc;
/** @brief set a custom message function
*
* Use to connect to user message system.
*/
int lcm2MessageFuncSet ( lcm2Message_f message_func )
{
if(message_func)
lcm2msg_p = message_func;
else
lcm2msg_p = lcm2MessageFunc;
return 1;
}
/** @brief run time API version
*/
int lcm2Version ( )
{
return LCM2PROFILER_API;
}
/** @} */ /* profiler */
/** \addtogroup profiler
*
* Oyranos ICC Profiler API provides a platformindependent C interface to generate
* ICC profiles. It's main purpose is to generate ICC Profiles in a programatic way.
* The only dependency is littleCMS 2
* <a href="http://www.littlecms.com">www.littlecms.com</a>.
* It reduces the need of many of the lcms2
* boilerplate for format independent sampling, multi localised strings from UTF8
* and more. The sampler collection contains effects and color space converters.
* The code consists of one source file and a header. So it can easily
* be placed inside your project.
*
*
* @section api API Documentation
* The Oyranos ICC Profiler API is contained in the lcm2_profiler.h header file.
*
* The high level API takes few arguments and generates a profile in
* one go.
* Effect profiles can be created in one call
* by lcm2CreateAbstractProfile(). It needs a @ref samplers function, which
* fills the Look Up Table (LUT). Two APIs exist to generate white point
* effects, lcm2CreateAbstractTemperatureProfile() and
* lcm2CreateAbstractWhitePointProfileLab() or
* lcm2CreateAbstractWhitePointProfileBradford(). These above high level APIs allow to
* write the profile to disc in one go.
*
* The lower level APIs can be used to customise the profile generation.
* Basic matrix/shaper profiles can be created with
* lcm2CreateICCMatrixProfile2() and filled with custom texts in
* lcm2CreateProfileFragment().
*
* The following low level code sample comes from @ref lcm2_profiler.c.
* The code sets up a basic profile description and color spaces:
* @dontinclude lcm2_profiler.c
* @code
* // prepare some variables
* double icc_profile_version = 2.3;
* double icc_ab[2] = {0.0, 0.0};
* cmsHPROFILE profile;
* const char * kelvin_name = "5000 K"
* int error;
* int grid_size = 17;
* cmsToneCurve * i_curve[3] = {NULL,NULL,NULL}, * o_curve[3] = {NULL,NULL,NULL};
i_curve[0] = o_curve[0] = cmsBuildGamma(0, 1.0);
for(i = 1; i < 3; ++i) { i_curve[i] = o_curve[i] = i_curve[0]; }
* @endcode
* @skip fragment
@until cmsSigAToB0Tag
*
* Profile i/o happens with lcm2OpenProfileFile(), which takes file names and
* a few wildcards as arguments. lcm2WriteProfileToFile() helps writing of
* canonical profile names. lcm2WriteProfileToMem() writes a profile to a
* custom memory allocator.
*
* Most of the functions come with examples.
*
*/
|
initialize.c | //-------------------------------------------------------------------------//
// //
// This benchmark is a serial C version of the NPB SP code. This C //
// version is developed by the Center for Manycore Programming at Seoul //
// National University and derived from the serial Fortran versions in //
// "NPB3.3-SER" developed by NAS. //
// //
// Permission to use, copy, distribute and modify this software for any //
// purpose with or without fee is hereby granted. This software is //
// provided "as is" without express or implied warranty. //
// //
// Information on NPB 3.3, including the technical report, the original //
// specifications, source code, results and information on how to submit //
// new results, is available at: //
// //
// http://www.nas.nasa.gov/Software/NPB/ //
// //
// Send comments or suggestions for this C version to cmp@aces.snu.ac.kr //
// //
// Center for Manycore Programming //
// School of Computer Science and Engineering //
// Seoul National University //
// Seoul 151-744, Korea //
// //
// E-mail: cmp@aces.snu.ac.kr //
// //
//-------------------------------------------------------------------------//
//-------------------------------------------------------------------------//
// Authors: Sangmin Seo, Jungwon Kim, Jun Lee, Jeongho Nah, Gangwon Jo, //
// and Jaejin Lee //
//-------------------------------------------------------------------------//
#include "header.h"
//---------------------------------------------------------------------
// This subroutine initializes the field variable u using
// tri-linear transfinite interpolation of the boundary values
//---------------------------------------------------------------------
void initialize()
{
int i, j, k, m, ix, iy, iz;
double xi, eta, zeta, Pface[2][3][5], Pxi, Peta, Pzeta, temp[5];
//---------------------------------------------------------------------
// Later (in compute_rhs) we compute 1/u for every element. A few of
// the corner elements are not used, but it convenient (and faster)
// to compute the whole thing with a simple loop. Make sure those
// values are nonzero by initializing the whole thing here.
//---------------------------------------------------------------------
for (k = 0; k <= grid_points[2]-1; k++) {
for (j = 0; j <= grid_points[1]-1; j++) {
for (i = 0; i <= grid_points[0]-1; i++) {
u[0][k][j][i] = 1.0;
u[1][k][j][i] = 0.0;
u[2][k][j][i] = 0.0;
u[3][k][j][i] = 0.0;
u[4][k][j][i] = 1.0;
}
}
}
//---------------------------------------------------------------------
// first store the "interpolated" values everywhere on the grid
//---------------------------------------------------------------------
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
for (ix = 0; ix < 2; ix++) {
Pxi = (double)ix;
exact_solution(Pxi, eta, zeta, &Pface[ix][0][0]);
}
for (iy = 0; iy < 2; iy++) {
Peta = (double)iy;
exact_solution(xi, Peta, zeta, &Pface[iy][1][0]);
}
for (iz = 0; iz < 2; iz++) {
Pzeta = (double)iz;
exact_solution(xi, eta, Pzeta, &Pface[iz][2][0]);
}
for (m = 0; m < 5; m++) {
Pxi = xi * Pface[1][0][m] + (1.0-xi) * Pface[0][0][m];
Peta = eta * Pface[1][1][m] + (1.0-eta) * Pface[0][1][m];
Pzeta = zeta * Pface[1][2][m] + (1.0-zeta) * Pface[0][2][m];
u[m][k][j][i] = Pxi + Peta + Pzeta -
Pxi*Peta - Pxi*Pzeta - Peta*Pzeta +
Pxi*Peta*Pzeta;
}
}
}
}
//---------------------------------------------------------------------
// now store the exact values on the boundaries
//---------------------------------------------------------------------
//---------------------------------------------------------------------
// west face
//---------------------------------------------------------------------
xi = 0.0;
i = 0;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][k][j][i] = temp[m];
}
}
}
//---------------------------------------------------------------------
// east face
//---------------------------------------------------------------------
xi = 1.0;
i = grid_points[0]-1;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][k][j][i] = temp[m];
}
}
}
//---------------------------------------------------------------------
// south face
//---------------------------------------------------------------------
eta = 0.0;
j = 0;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][k][j][i] = temp[m];
}
}
}
//---------------------------------------------------------------------
// north face
//---------------------------------------------------------------------
eta = 1.0;
j = grid_points[1]-1;
for (k = 0; k <= grid_points[2]-1; k++) {
zeta = (double)k * dnzm1;
for (i = 0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][k][j][i] = temp[m];
}
}
}
//---------------------------------------------------------------------
// bottom face
//---------------------------------------------------------------------
zeta = 0.0;
k = 0;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (i =0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][k][j][i] = temp[m];
}
}
}
//---------------------------------------------------------------------
// top face
//---------------------------------------------------------------------
zeta = 1.0;
k = grid_points[2]-1;
for (j = 0; j <= grid_points[1]-1; j++) {
eta = (double)j * dnym1;
for (i =0; i <= grid_points[0]-1; i++) {
xi = (double)i * dnxm1;
exact_solution(xi, eta, zeta, temp);
for (m = 0; m < 5; m++) {
u[m][k][j][i] = temp[m];
}
}
}
#pragma omp target update to(u)
}
/*
void lhsinit(int ni, int nj)
{
int j, m;
//---------------------------------------------------------------------
// zap the whole left hand side for starters
// set all diagonal values to 1. This is overkill, but convenient
//---------------------------------------------------------------------
for (j = 1; j <= nj; j++) {
for (m = 0; m < 5; m++) {
lhs [j][0][m] = 0.0;
lhsp[j][0][m] = 0.0;
lhsm[j][0][m] = 0.0;
lhs [j][ni][m] = 0.0;
lhsp[j][ni][m] = 0.0;
lhsm[j][ni][m] = 0.0;
}
lhs [j][0][2] = 1.0;
lhsp[j][0][2] = 1.0;
lhsm[j][0][2] = 1.0;
lhs [j][ni][2] = 1.0;
lhsp[j][ni][2] = 1.0;
lhsm[j][ni][2] = 1.0;
}
}
void lhsinitj(int nj, int ni)
{
int i, m;
//---------------------------------------------------------------------
// zap the whole left hand side for starters
// set all diagonal values to 1. This is overkill, but convenient
//---------------------------------------------------------------------
for (i = 1; i <= ni; i++) {
for (m = 0; m < 5; m++) {
lhs [0][i][m] = 0.0;
lhsp[0][i][m] = 0.0;
lhsm[0][i][m] = 0.0;
lhs [nj][i][m] = 0.0;
lhsp[nj][i][m] = 0.0;
lhsm[nj][i][m] = 0.0;
}
lhs [0][i][2] = 1.0;
lhsp[0][i][2] = 1.0;
lhsm[0][i][2] = 1.0;
lhs [nj][i][2] = 1.0;
lhsp[nj][i][2] = 1.0;
lhsm[nj][i][2] = 1.0;
}
}*/
|
gemm.c | #include "gemm.h"
#include "utils.h"
#include "im2col.h"
#include "dark_cuda.h"
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <float.h>
#include <string.h>
#include <stdint.h>
#ifdef _WIN32
#include <intrin.h>
#endif
#if defined(_OPENMP)
#include <omp.h>
#endif
#define TILE_M 4 // 4 ops
#define TILE_N 16 // AVX2 = 2 ops * 8 floats
#define TILE_K 16 // loop
#ifdef __cplusplus
#define PUT_IN_REGISTER
#else
#define PUT_IN_REGISTER register
#endif
void gemm_bin(int M, int N, int K, float ALPHA,
char *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
char A_PART = A[i*lda+k];
if(A_PART){
for(j = 0; j < N; ++j){
C[i*ldc+j] += B[k*ldb+j];
}
} else {
for(j = 0; j < N; ++j){
C[i*ldc+j] -= B[k*ldb+j];
}
}
}
}
}
float *random_matrix(int rows, int cols)
{
int i;
float* m = (float*)xcalloc(rows * cols, sizeof(float));
for(i = 0; i < rows*cols; ++i){
m[i] = (float)rand()/RAND_MAX;
}
return m;
}
void time_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<10; ++i){
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf ms\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void gemm(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
gemm_cpu( TA, TB, M, N, K, ALPHA,A,lda, B, ldb,BETA,C,ldc);
}
//--------------------------------------------
// XNOR bitwise GEMM for binary neural network
//--------------------------------------------
static inline unsigned char xnor(unsigned char a, unsigned char b) {
//return a == b;
return !(a^b);
}
// INT-32
static inline uint32_t get_bit_int32(uint32_t const*const src, size_t index) {
size_t src_i = index / 32;
int src_shift = index % 32;
unsigned char val = (src[src_i] & (1 << src_shift)) > 0;
return val;
}
static inline uint32_t xnor_int32(uint32_t a, uint32_t b) {
return ~(a^b);
}
static inline uint64_t xnor_int64(uint64_t a, uint64_t b) {
return ~(a^b);
}
static inline uint32_t fill_bit_int32(char src) {
if (src == 0) return 0x00000000;
else return 0xFFFFFFFF;
}
static inline uint64_t fill_bit_int64(char src) {
if (src == 0) return 0x0000000000000000;
else return 0xFFFFFFFFFFFFFFFF;
}
void binary_int32_printf(uint32_t src) {
int i;
for (i = 0; i < 32; ++i) {
if (src & 1) printf("1");
else printf("0");
src = src >> 1;
}
printf("\n");
}
void binary_int64_printf(uint64_t src) {
int i;
for (i = 0; i < 64; ++i) {
if (src & 1) printf("1");
else printf("0");
src = src >> 1;
}
printf("\n");
}
/*
void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = xcalloc(M*N, sizeof(int));
int i, j, k;
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
char a_bit = get_bit(A, i*lda + k);
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
char b_bit = get_bit(B, k*ldb + j);
count_arr[i*ldc + j] += xnor(a_bit, b_bit);
}
}
}
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
}
}
free(count_arr);
}
*/
/*
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = xcalloc(M*N, sizeof(int));
int i, j, k;
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
char a_bit = get_bit(A, i*lda + k);
char b_bit = get_bit(B, j*ldb + k);
count_arr[i*ldc + j] += xnor(a_bit, b_bit);
}
}
}
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
}
}
free(count_arr);
}
*/
/*
void gemm_nn_custom_bin_mean(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int *count_arr = xcalloc(M*N, sizeof(int));
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k, h;
for (k = 0; k < K; ++k) { // l.size*l.size*l.c - one filter size [27 - 9216]
const char a_bit = get_bit(A, i*lda + k);
uint64_t a_bit64 = fill_bit_int64(a_bit);
int k_ldb = k*ldb;
for (j = 0; j < N; j += 64) { // out_h*out_w - one channel output size [169 - 173056]
if ((N - j > 64) && (k_ldb % 8 == 0)) {
uint64_t b_bit64 = *((uint64_t *)(B + (k_ldb + j) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
//printf("\n %d \n",__builtin_popcountll(c_bit64)); // gcc
printf("\n %d \n", __popcnt64(c_bit64)); // msvs
int h;
for (h = 0; h < 64; ++h)
if ((c_bit64 >> h) & 1) count_arr[i*ldc + j + h] += 1;
//binary_int64_printf(a_bit64);
//binary_int64_printf(b_bit64);
//binary_int64_printf(c_bit64);
}
else {
for (; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
char b_bit = get_bit(B, k_ldb + j);
if (xnor(a_bit, b_bit)) count_arr[i*ldc + j] += 1;
}
}
}
}
}
if (mean_arr) {
//int K_2 = K / 2;
for (i = 0; i < M; ++i) {
float mean_val = mean_arr[i];
//float mean_val2 = 2 * mean_val;
for (j = 0; j < N; ++j) {
C[i*ldc + j] = (2 * count_arr[i*ldc + j] - K) * mean_val;
//C[i*ldc + j] = (count_arr[i*ldc + j] - K_2) *mean_val2;
}
}
}
else {
for (i = 0; i < M; ++i) {
for (j = 0; j < N; ++j) {
C[i*ldc + j] = count_arr[i*ldc + j] - K / 2;
}
}
}
free(count_arr);
//getchar();
}
*/
/*
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k, h;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8));
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
#ifdef WIN32
int tmp_count = __popcnt64(c_bit64);
#else
int tmp_count = __builtin_popcountll(c_bit64);
#endif
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
*/
//----------------------------
// is not used
/*
void transpose_32x32_bits_my(uint32_t *A, uint32_t *B, int lda, int ldb)
{
unsigned int x, y;
for (y = 0; y < 32; ++y) {
for (x = 0; x < 32; ++x) {
if (A[y * lda] & ((uint32_t)1 << x)) B[x * ldb] |= (uint32_t)1 << y;
}
}
}
*/
#ifndef GPU
uint8_t reverse_8_bit(uint8_t a) {
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
uint32_t reverse_32_bit(uint32_t a)
{
// unsigned int __rbit(unsigned int val) // for ARM //__asm__("rbit %0, %1\n" : "=r"(output) : "r"(input));
return (reverse_8_bit(a >> 24) << 0) |
(reverse_8_bit(a >> 16) << 8) |
(reverse_8_bit(a >> 8) << 16) |
(reverse_8_bit(a >> 0) << 24);
}
#define swap(a0, a1, j, m) t = (a0 ^ (a1 >>j)) & m; a0 = a0 ^ t; a1 = a1 ^ (t << j);
void transpose32_optimized(uint32_t A[32]) {
int j, k;
unsigned m, t;
//m = 0x0000FFFF;
//for (j = 16; j != 0; j = j >> 1, m = m ^ (m << j)) {
// for (k = 0; k < 32; k = (k + j + 1) & ~j) {
// t = (A[k] ^ (A[k + j] >> j)) & m;
// A[k] = A[k] ^ t;
// A[k + j] = A[k + j] ^ (t << j);
// }
//}
j = 16;
m = 0x0000FFFF;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 8;
m = 0x00ff00ff;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 4;
m = 0x0f0f0f0f;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 2;
m = 0x33333333;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
j = 1;
m = 0x55555555;
for (k = 0; k < 32; k = (k + j + 1) & ~j) { swap(A[k], A[k + j], j, m); }
// reverse Y
for (j = 0; j < 16; ++j) {
uint32_t tmp = A[j];
A[j] = reverse_32_bit(A[31 - j]);
A[31 - j] = reverse_32_bit(tmp);
}
}
void transpose_32x32_bits_reversed_diagonale(uint32_t *A, uint32_t *B, int m, int n)
{
unsigned A_tmp[32];
int i;
#pragma unroll
for (i = 0; i < 32; ++i) A_tmp[i] = A[i * m];
transpose32_optimized(A_tmp);
#pragma unroll
for (i = 0; i < 32; ++i) B[i*n] = A_tmp[i];
}
void transpose_8x8_bits_my(unsigned char *A, unsigned char *B, int lda, int ldb)
{
unsigned x, y;
for (y = 0; y < 8; ++y) {
for (x = 0; x < 8; ++x) {
if (A[y * lda] & (1 << x)) B[x * ldb] |= 1 << y;
}
}
}
unsigned char reverse_byte_1(char a)
{
return ((a & 0x1) << 7) | ((a & 0x2) << 5) |
((a & 0x4) << 3) | ((a & 0x8) << 1) |
((a & 0x10) >> 1) | ((a & 0x20) >> 3) |
((a & 0x40) >> 5) | ((a & 0x80) >> 7);
}
unsigned char reverse_byte(unsigned char a)
{
return ((a * 0x0802LU & 0x22110LU) | (a * 0x8020LU & 0x88440LU)) * 0x10101LU >> 16;
}
static unsigned char lookup[16] = {
0x0, 0x8, 0x4, 0xc, 0x2, 0xa, 0x6, 0xe,
0x1, 0x9, 0x5, 0xd, 0x3, 0xb, 0x7, 0xf, };
unsigned char reverse_byte_3(unsigned char n) {
// Reverse the top and bottom nibble then swap them.
return (lookup[n & 0b1111] << 4) | lookup[n >> 4];
}
void transpose8rS32_reversed_diagonale(unsigned char* A, unsigned char* B, int m, int n)
{
unsigned x, y, t;
x = y = 0;
// Load the array and pack it into x and y.
//x = (A[0] << 24) | (A[m] << 16) | (A[2 * m] << 8) | A[3 * m];
//y = (A[4 * m] << 24) | (A[5 * m] << 16) | (A[6 * m] << 8) | A[7 * m];
t = (x ^ (x >> 7)) & 0x00AA00AA; x = x ^ t ^ (t << 7);
t = (y ^ (y >> 7)) & 0x00AA00AA; y = y ^ t ^ (t << 7);
t = (x ^ (x >> 14)) & 0x0000CCCC; x = x ^ t ^ (t << 14);
t = (y ^ (y >> 14)) & 0x0000CCCC; y = y ^ t ^ (t << 14);
t = (x & 0xF0F0F0F0) | ((y >> 4) & 0x0F0F0F0F);
y = ((x << 4) & 0xF0F0F0F0) | (y & 0x0F0F0F0F);
x = t;
B[7 * n] = reverse_byte(x >> 24); B[6 * n] = reverse_byte(x >> 16); B[5 * n] = reverse_byte(x >> 8); B[4 * n] = reverse_byte(x);
B[3 * n] = reverse_byte(y >> 24); B[2 * n] = reverse_byte(y >> 16); B[1 * n] = reverse_byte(y >> 8); B[0 * n] = reverse_byte(y);
}
/*
// transpose by 8-bit
void transpose_bin(char *A, char *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
//printf("\n n = %d, ldb = %d \t\t m = %d, lda = %d \n", n, ldb, m, lda);
int i;
#pragma omp parallel for
for (i = 0; i < n; i += 8) {
int j;
for (j = 0; j < m; j += 8) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
//transpose_8x8_bits_my(&A[a_index/8], &B[b_index/8], lda/8, ldb/8);
transpose8rS32_reversed_diagonale(&A[a_index / 8], &B[b_index / 8], lda / 8, ldb / 8);
}
for (; j < m; ++j) {
if (get_bit(A, i*lda + j)) set_bit(B, j*ldb + i);
}
}
}
*/
#endif
// transpose by 32-bit
void transpose_bin(uint32_t *A, uint32_t *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
//printf("\n n = %d (n mod 32 = %d), m = %d (m mod 32 = %d) \n", n, n % 32, m, m % 32);
//printf("\n lda = %d (lda mod 32 = %d), ldb = %d (ldb mod 32 = %d) \n", lda, lda % 32, ldb, ldb % 32);
int i;
#pragma omp parallel for
for (i = 0; i < n; i += 32) {
int j;
for (j = 0; j < m; j += 32) {
int a_index = i*lda + j;
int b_index = j*ldb + i;
transpose_32x32_bits_reversed_diagonale(&A[a_index / 32], &B[b_index / 32], lda / 32, ldb / 32);
//transpose_32x32_bits_my(&A[a_index/32], &B[b_index/32], lda/32, ldb/32);
}
for (; j < m; ++j) {
if (get_bit((const unsigned char* const)A, i * lda + j)) set_bit((unsigned char* const)B, j * ldb + i);
}
}
}
static inline int popcnt_32(uint32_t val32) {
#ifdef WIN32 // Windows MSVS
int tmp_count = __popcnt(val32);
#else // Linux GCC
int tmp_count = __builtin_popcount(val32);
#endif
return tmp_count;
}
//----------------------------
#if (defined(__AVX__) && defined(__x86_64__)) || (defined(_WIN64) && !defined(__MINGW32__))
#if (defined(_WIN64) && !defined(__MINGW64__))
#include <intrin.h>
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#if defined(_MSC_VER) && _MSC_VER <= 1900
static inline __int32 _mm256_extract_epi64(__m256i a, const int index) {
return a.m256i_i64[index];
}
static inline __int32 _mm256_extract_epi32(__m256i a, const int index) {
return a.m256i_i32[index];
}
#endif
static inline float _castu32_f32(uint32_t a) {
return *((float *)&a);
}
static inline float _mm256_extract_float32(__m256 a, const int index) {
return a.m256_f32[index];
}
#else // Linux GCC/Clang
#include <x86intrin.h>
#include <ammintrin.h>
#include <immintrin.h>
#include <smmintrin.h>
#include <cpuid.h>
static inline float _castu32_f32(uint32_t a) {
return *((float *)&a);
}
static inline float _mm256_extract_float32(__m256 a, const int index) {
switch(index) {
case 0:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 0));
case 1:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 1));
case 2:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 2));
case 3:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 3));
case 4:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 4));
case 5:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 5));
case 6:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 6));
case 7:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 7));
default:
return _castu32_f32(_mm256_extract_epi32(_mm256_castps_si256(a), 0));
}
}
void asm_cpuid(uint32_t* abcd, uint32_t eax)
{
uint32_t ebx = 0, edx = 0, ecx = 0;
// EBX is saved to EDI and later restored
__asm__("movl %%ebx, %%edi;"
"cpuid;"
"xchgl %%ebx, %%edi;"
: "=D"(ebx),
"+a"(eax), "+c"(ecx), "=d"(edx));
abcd[0] = eax;
abcd[1] = ebx;
abcd[2] = ecx;
abcd[3] = edx;
}
#endif
#ifdef _WIN32
// Windows
#define cpuid(info, x) __cpuidex(info, x, 0)
#else
// GCC Intrinsics
void cpuid(int info[4], int InfoType) {
__cpuid_count(InfoType, 0, info[0], info[1], info[2], info[3]);
}
#endif
// Misc.
static int HW_MMX, HW_x64, HW_RDRAND, HW_BMI1, HW_BMI2, HW_ADX, HW_PREFETCHWT1;
static int HW_ABM; // Advanced Bit Manipulation
// SIMD: 128-bit
static int HW_SSE, HW_SSE2, HW_SSE3, HW_SSSE3, HW_SSE41, HW_SSE42, HW_SSE4a, HW_AES, HW_SHA;
// SIMD: 256-bit
static int HW_AVX, HW_XOP, HW_FMA3, HW_FMA4, HW_AVX2;
// SIMD: 512-bit
static int HW_AVX512F; // AVX512 Foundation
static int HW_AVX512CD; // AVX512 Conflict Detection
static int HW_AVX512PF; // AVX512 Prefetch
static int HW_AVX512ER; // AVX512 Exponential + Reciprocal
static int HW_AVX512VL; // AVX512 Vector Length Extensions
static int HW_AVX512BW; // AVX512 Byte + Word
static int HW_AVX512DQ; // AVX512 Doubleword + Quadword
static int HW_AVX512IFMA; // AVX512 Integer 52-bit Fused Multiply-Add
static int HW_AVX512VBMI; // AVX512 Vector Byte Manipulation Instructions
// https://stackoverflow.com/questions/6121792/how-to-check-if-a-cpu-supports-the-sse3-instruction-set
void check_cpu_features(void) {
int info[4];
cpuid(info, 0);
int nIds = info[0];
cpuid(info, 0x80000000);
unsigned nExIds = info[0];
// Detect Features
if (nIds >= 0x00000001) {
cpuid(info, 0x00000001);
HW_MMX = (info[3] & ((uint32_t)1 << 23)) != 0;
HW_SSE = (info[3] & ((uint32_t)1 << 25)) != 0;
HW_SSE2 = (info[3] & ((uint32_t)1 << 26)) != 0;
HW_SSE3 = (info[2] & ((uint32_t)1 << 0)) != 0;
HW_SSSE3 = (info[2] & ((uint32_t)1 << 9)) != 0;
HW_SSE41 = (info[2] & ((uint32_t)1 << 19)) != 0;
HW_SSE42 = (info[2] & ((uint32_t)1 << 20)) != 0;
HW_AES = (info[2] & ((uint32_t)1 << 25)) != 0;
HW_AVX = (info[2] & ((uint32_t)1 << 28)) != 0;
HW_FMA3 = (info[2] & ((uint32_t)1 << 12)) != 0;
HW_RDRAND = (info[2] & ((uint32_t)1 << 30)) != 0;
}
if (nIds >= 0x00000007) {
cpuid(info, 0x00000007);
HW_AVX2 = (info[1] & ((uint32_t)1 << 5)) != 0;
HW_BMI1 = (info[1] & ((uint32_t)1 << 3)) != 0;
HW_BMI2 = (info[1] & ((uint32_t)1 << 8)) != 0;
HW_ADX = (info[1] & ((uint32_t)1 << 19)) != 0;
HW_SHA = (info[1] & ((uint32_t)1 << 29)) != 0;
HW_PREFETCHWT1 = (info[2] & ((uint32_t)1 << 0)) != 0;
HW_AVX512F = (info[1] & ((uint32_t)1 << 16)) != 0;
HW_AVX512CD = (info[1] & ((uint32_t)1 << 28)) != 0;
HW_AVX512PF = (info[1] & ((uint32_t)1 << 26)) != 0;
HW_AVX512ER = (info[1] & ((uint32_t)1 << 27)) != 0;
HW_AVX512VL = (info[1] & ((uint32_t)1 << 31)) != 0;
HW_AVX512BW = (info[1] & ((uint32_t)1 << 30)) != 0;
HW_AVX512DQ = (info[1] & ((uint32_t)1 << 17)) != 0;
HW_AVX512IFMA = (info[1] & ((uint32_t)1 << 21)) != 0;
HW_AVX512VBMI = (info[2] & ((uint32_t)1 << 1)) != 0;
}
if (nExIds >= 0x80000001) {
cpuid(info, 0x80000001);
HW_x64 = (info[3] & ((uint32_t)1 << 29)) != 0;
HW_ABM = (info[2] & ((uint32_t)1 << 5)) != 0;
HW_SSE4a = (info[2] & ((uint32_t)1 << 6)) != 0;
HW_FMA4 = (info[2] & ((uint32_t)1 << 16)) != 0;
HW_XOP = (info[2] & ((uint32_t)1 << 11)) != 0;
}
}
int is_avx() {
static int result = -1;
if (result == -1) {
check_cpu_features();
result = HW_AVX;
if (result == 1) printf(" Used AVX \n");
else printf(" Not used AVX \n");
}
return result;
}
int is_fma_avx2() {
static int result = -1;
if (result == -1) {
check_cpu_features();
result = HW_FMA3 && HW_AVX2;
if (result == 1) printf(" Used FMA & AVX2 \n");
else printf(" Not used FMA & AVX2 \n");
}
return result;
}
// https://software.intel.com/sites/landingpage/IntrinsicsGuide
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
if (is_avx() == 1) { // AVX
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
float A_PART = ALPHA*A[i*lda + k];
__m256 a256, b256, c256, result256; // AVX
a256 = _mm256_set1_ps(A_PART);
for (j = 0; j < N - 8; j += 8) {
b256 = _mm256_loadu_ps(&B[k*ldb + j]);
c256 = _mm256_loadu_ps(&C[i*ldc + j]);
// FMA - Intel Haswell (2013), AMD Piledriver (2012)
//result256 = _mm256_fmadd_ps(a256, b256, c256);
result256 = _mm256_mul_ps(a256, b256);
result256 = _mm256_add_ps(result256, c256);
_mm256_storeu_ps(&C[i*ldc + j], result256);
}
int prev_end = (N % 8 == 0) ? (N - 8) : (N / 8) * 8;
for (j = prev_end; j < N; ++j)
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
else {
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
/* // SSE
__m128 a128, b128, c128, result128; // SSE
a128 = _mm_set1_ps(A_PART);
for (j = 0; j < N - 4; j += 4) {
b128 = _mm_loadu_ps(&B[k*ldb + j]);
c128 = _mm_loadu_ps(&C[i*ldc + j]);
//result128 = _mm_fmadd_ps(a128, b128, c128);
result128 = _mm_mul_ps(a128, b128);
result128 = _mm_add_ps(result128, c128);
_mm_storeu_ps(&C[i*ldc + j], result128);
}
int prev_end = (N % 4 == 0) ? (N - 4) : (N / 4) * 4;
for (j = prev_end; j < N; ++j){
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
*/
}
}
}
}
void gemm_nn_fast(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i;
#pragma omp parallel for
for (i = 0; i < (M / TILE_M)*TILE_M; i += TILE_M)
{
int j, k;
int i_d, k_d;
for (k = 0; k < (K / TILE_K)*TILE_K; k += TILE_K)
{
for (j = 0; j < (N / TILE_N)*TILE_N; j += TILE_N)
{
// L1 - 6 bits tag [11:6] - cache size 32 KB, conflict for each 4 KB
// L2 - 9 bits tag [14:6] - cache size 256 KB, conflict for each 32 KB
// L3 - 13 bits tag [18:6] - cache size 8 MB, conflict for each 512 KB
__m256 result256;
__m256 a256_0, b256_0; // AVX
__m256 a256_1, b256_1; // AVX
__m256 a256_2;// , b256_2; // AVX
__m256 a256_3;// , b256_3; // AVX
__m256 c256_0, c256_1, c256_2, c256_3;
__m256 c256_4, c256_5, c256_6, c256_7;
c256_0 = _mm256_loadu_ps(&C[(0 + i)*ldc + (0 + j)]);
c256_1 = _mm256_loadu_ps(&C[(1 + i)*ldc + (0 + j)]);
c256_2 = _mm256_loadu_ps(&C[(0 + i)*ldc + (8 + j)]);
c256_3 = _mm256_loadu_ps(&C[(1 + i)*ldc + (8 + j)]);
c256_4 = _mm256_loadu_ps(&C[(2 + i)*ldc + (0 + j)]);
c256_5 = _mm256_loadu_ps(&C[(3 + i)*ldc + (0 + j)]);
c256_6 = _mm256_loadu_ps(&C[(2 + i)*ldc + (8 + j)]);
c256_7 = _mm256_loadu_ps(&C[(3 + i)*ldc + (8 + j)]);
for (k_d = 0; k_d < (TILE_K); ++k_d)
{
a256_0 = _mm256_set1_ps(ALPHA*A[(0 + i)*lda + (k_d + k)]);
a256_1 = _mm256_set1_ps(ALPHA*A[(1 + i)*lda + (k_d + k)]);
a256_2 = _mm256_set1_ps(ALPHA*A[(2 + i)*lda + (k_d + k)]);
a256_3 = _mm256_set1_ps(ALPHA*A[(3 + i)*lda + (k_d + k)]);
b256_0 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (0 + j)]);
b256_1 = _mm256_loadu_ps(&B[(k_d + k)*ldb + (8 + j)]);
// FMA - Intel Haswell (2013), AMD Piledriver (2012)
//c256_0 = _mm256_fmadd_ps(a256_0, b256_0, c256_0);
//c256_1 = _mm256_fmadd_ps(a256_1, b256_0, c256_1);
//c256_2 = _mm256_fmadd_ps(a256_0, b256_1, c256_2);
//c256_3 = _mm256_fmadd_ps(a256_1, b256_1, c256_3);
//c256_4 = _mm256_fmadd_ps(a256_2, b256_0, c256_4);
//c256_5 = _mm256_fmadd_ps(a256_3, b256_0, c256_5);
//c256_6 = _mm256_fmadd_ps(a256_2, b256_1, c256_6);
//c256_7 = _mm256_fmadd_ps(a256_3, b256_1, c256_7);
result256 = _mm256_mul_ps(a256_0, b256_0);
c256_0 = _mm256_add_ps(result256, c256_0);
result256 = _mm256_mul_ps(a256_1, b256_0);
c256_1 = _mm256_add_ps(result256, c256_1);
result256 = _mm256_mul_ps(a256_0, b256_1);
c256_2 = _mm256_add_ps(result256, c256_2);
result256 = _mm256_mul_ps(a256_1, b256_1);
c256_3 = _mm256_add_ps(result256, c256_3);
result256 = _mm256_mul_ps(a256_2, b256_0);
c256_4 = _mm256_add_ps(result256, c256_4);
result256 = _mm256_mul_ps(a256_3, b256_0);
c256_5 = _mm256_add_ps(result256, c256_5);
result256 = _mm256_mul_ps(a256_2, b256_1);
c256_6 = _mm256_add_ps(result256, c256_6);
result256 = _mm256_mul_ps(a256_3, b256_1);
c256_7 = _mm256_add_ps(result256, c256_7);
}
_mm256_storeu_ps(&C[(0 + i)*ldc + (0 + j)], c256_0);
_mm256_storeu_ps(&C[(1 + i)*ldc + (0 + j)], c256_1);
_mm256_storeu_ps(&C[(0 + i)*ldc + (8 + j)], c256_2);
_mm256_storeu_ps(&C[(1 + i)*ldc + (8 + j)], c256_3);
_mm256_storeu_ps(&C[(2 + i)*ldc + (0 + j)], c256_4);
_mm256_storeu_ps(&C[(3 + i)*ldc + (0 + j)], c256_5);
_mm256_storeu_ps(&C[(2 + i)*ldc + (8 + j)], c256_6);
_mm256_storeu_ps(&C[(3 + i)*ldc + (8 + j)], c256_7);
}
for (j = (N / TILE_N)*TILE_N; j < N; ++j) {
for (i_d = i; i_d < (i + TILE_M); ++i_d)
{
for (k_d = k; k_d < (k + TILE_K); ++k_d)
{
PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k_d];
C[i_d*ldc + j] += A_PART*B[k_d*ldb + j];
}
}
}
}
for (k = (K / TILE_K)*TILE_K; k < K; ++k)
{
for (i_d = i; i_d < (i + TILE_M); ++i_d)
{
PUT_IN_REGISTER float A_PART = ALPHA*A[i_d*lda + k];
for (j = 0; j < N; ++j) {
C[i_d*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
for (i = (M / TILE_M)*TILE_M; i < M; ++i) {
int j, k;
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
//printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]);
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
PUT_IN_REGISTER uint32_t A_PART = A[i*lda + s];
__m256i a256 = _mm256_set1_epi32(A_PART);
for (j = 0; j < N - 8; j += 8)
{
__m256i b256 = *((__m256i*)&B[s*ldb + j]);
__m256i xor256 = _mm256_xor_si256(a256, b256); // xnor = xor(a,b)
__m256i all_1 = _mm256_set1_epi8((char)255);
__m256i xnor256 = _mm256_andnot_si256(xor256, all_1); // xnor = not(xor(a,b))
// waiting for - CPUID Flags: AVX512VPOPCNTDQ: __m512i _mm512_popcnt_epi32(__m512i a)
__m256 count = _mm256_setr_ps(
popcnt_32(_mm256_extract_epi32(xnor256, 0)),
popcnt_32(_mm256_extract_epi32(xnor256, 1)),
popcnt_32(_mm256_extract_epi32(xnor256, 2)),
popcnt_32(_mm256_extract_epi32(xnor256, 3)),
popcnt_32(_mm256_extract_epi32(xnor256, 4)),
popcnt_32(_mm256_extract_epi32(xnor256, 5)),
popcnt_32(_mm256_extract_epi32(xnor256, 6)),
popcnt_32(_mm256_extract_epi32(xnor256, 7)));
__m256 val2 = _mm256_set1_ps(2);
count = _mm256_mul_ps(count, val2); // count * 2
__m256 val32 = _mm256_set1_ps(32);
count = _mm256_sub_ps(count, val32); // count - 32
__m256 mean256 = _mm256_set1_ps(mean_val);
count = _mm256_mul_ps(count, mean256); // count * mean_val
__m256 c256 = *((__m256*)&C[i*ldc + j]);
count = _mm256_add_ps(count, c256); // c = c + count
*((__m256*)&C[i*ldc + j]) = count;
}
for (; j < N; ++j) // out_h*out_w;
{
PUT_IN_REGISTER uint32_t B_PART = B[s*ldb + j];
uint32_t xnor_result = ~(A_PART ^ B_PART);
int32_t count = popcnt_32(xnor_result); // must be Signed int
C[i*ldc + j] += (2 * count - 32) * mean_val;
}
}
}
}
void convolution_2d_old(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output)
{
//const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
//const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
//int i, f, j;
int chan, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
float sum = 0;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
sum += input[input_index] * weights[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output, float *mean)
{
//const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
//const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
int i;
#if defined(_OPENMP)
static int max_num_threads = 0;
if (max_num_threads == 0) {
max_num_threads = omp_get_max_threads();
//omp_set_num_threads( max_num_threads / 2);
}
#endif
//convolution_2d_old(w, h, ksize, n, c, pad, stride, weights, input, output);
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
for (i = 0; i < ksize*ksize*n*c; i+=8) {
*((__m256*)&weights[i]) = _mm256_and_ps(*((__m256*)&weights[i]), _mm256_castsi256_ps(all256_sing1));
}
//for (i = 0; i < w*h*c; i += 8) {
//*((__m256*)&input[i]) = _mm256_and_ps(*((__m256*)&input[i]), _mm256_castsi256_ps(all256_sing1));
//}
//__m256i all256_last_zero = _mm256_set1_epi32(0xFFFFFFFF);
//all256_last_zero.m256i_i32[7] = 0;
__m256i all256_last_zero =
_mm256_set_epi32(0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x0);
__m256i idx256 = _mm256_set_epi32(0, 7, 6, 5, 4, 3, 2, 1);
//__m256 all256_sing1 = _mm256_set1_ps(0x80000000);
__m256 all256_one = _mm256_set1_ps(1);
__m256i all256i_one = _mm256_set1_epi32(1);
///__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i]));
///__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
int chan, y, x, f_y, f_x;
float cur_mean = fabs(mean[fil]);
__m256 mean256 = _mm256_set1_ps(cur_mean);
// channel index
//for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w-8; x+=8)
{
int const output_index = fil*w*h + y*w + x;
float sum = 0;
__m256 sum256 = _mm256_set1_ps(0);
for (chan = 0; chan < c; ++chan) {
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
//__m256 in = *((__m256*)&input[input_pre_index + input_y*w]);
if (input_y < 0 || input_y >= h) continue;
//__m256 in = _mm256_loadu_ps(&input[input_pre_index + input_y*w + x - pad]);
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
//if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
//if (input_y < 0 || input_y >= h) continue;
//sum += input[input_index] * weights[weights_index];
__m256 in = *((__m256*)&input[input_index]);
__m256 w = _mm256_set1_ps(weights[weights_index]);
//__m256 w_sign = _mm256_and_ps(w, _mm256_castsi256_ps(all256_sing1)); // check sign in 8 x 32-bit floats
__m256 xor256 = _mm256_xor_ps(w, in);
//printf("\n xor256_1 = %f, xor256_2 = %f \n", xor256.m256_f32[0], xor256.m256_f32[1]);
//printf("\n in = %f, w = %f, xor256 = %f \n", in.m256_f32[0], w_sign.m256_f32[0], xor256.m256_f32[0]);
//__m256 pn1 = _mm256_and_ps(_mm256_castsi256_ps(all256i_one), xor256);
//sum256 = xor256;
sum256 = _mm256_add_ps(xor256, sum256);
//printf("\n --- \n");
//printf("\n 0 = %f, 1 = %f, 2 = %f, 3 = %f, 4 = %f, 5 = %f, 6 = %f, 7 = %f \n", in.m256_f32[0], in.m256_f32[1], in.m256_f32[2], in.m256_f32[3], in.m256_f32[4], in.m256_f32[5], in.m256_f32[6], in.m256_f32[7]);
if (f_x < ksize-1) {
//in = _mm256_permutevar8x32_ps(in, idx256);
//in = _mm256_and_ps(in, _mm256_castsi256_ps(all256_last_zero));
}
}
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
//output[output_index] += sum;
sum256 = _mm256_mul_ps(sum256, mean256);
//printf("\n cur_mean = %f, sum256 = %f, sum256 = %f, in = %f \n",
// cur_mean, sum256.m256_f32[0], sum256.m256_f32[1], input[input_pre_index]);
//__m256 out = *((__m256*)&output[output_index]);
//out = _mm256_add_ps(out, sum256);
//*((__m256*)&output[output_index]) = out;
*((__m256*)&output[output_index]) = sum256;
//_mm256_storeu_ps(&C[i*ldc + j], result256);
}
}
}
// http://graphics.stanford.edu/~seander/bithacks.html
// https://stackoverflow.com/questions/17354971/fast-counting-the-number-of-set-bits-in-m128i-register
// https://arxiv.org/pdf/1611.07612.pdf
static inline int popcnt128(__m128i n) {
const __m128i n_hi = _mm_unpackhi_epi64(n, n);
#if defined(_MSC_VER)
return __popcnt64(_mm_cvtsi128_si64(n)) + __popcnt64(_mm_cvtsi128_si64(n_hi));
#elif defined(__APPLE__) && defined(__clang__)
return _mm_popcnt_u64(_mm_cvtsi128_si64(n)) + _mm_popcnt_u64(_mm_cvtsi128_si64(n_hi));
#else
return __popcntq(_mm_cvtsi128_si64(n)) + __popcntq(_mm_cvtsi128_si64(n_hi));
#endif
}
static inline int popcnt256(__m256i n) {
return popcnt128(_mm256_extractf128_si256(n, 0)) + popcnt128(_mm256_extractf128_si256(n, 1));
}
static inline __m256i count256(__m256i v) {
__m256i lookup =
_mm256_setr_epi8(0, 1, 1, 2, 1, 2, 2, 3, 1, 2,
2, 3, 2, 3, 3, 4, 0, 1, 1, 2, 1, 2, 2, 3,
1, 2, 2, 3, 2, 3, 3, 4);
__m256i low_mask = _mm256_set1_epi8(0x0f);
__m256i lo = _mm256_and_si256(v, low_mask);
__m256i hi = _mm256_and_si256(_mm256_srli_epi32(v, 4), low_mask);
__m256i popcnt1 = _mm256_shuffle_epi8(lookup, lo);
__m256i popcnt2 = _mm256_shuffle_epi8(lookup, hi);
__m256i total = _mm256_add_epi8(popcnt1, popcnt2);
return _mm256_sad_epu8(total, _mm256_setzero_si256());
}
static inline int popcnt256_custom(__m256i n) {
__m256i val = count256(n);
//return val.m256i_i64[0] +
//val.m256i_i64[1] +
//val.m256i_i64[2] +
//val.m256i_i64[3];
return _mm256_extract_epi64(val, 0)
+ _mm256_extract_epi64(val, 1)
+ _mm256_extract_epi64(val, 2)
+ _mm256_extract_epi64(val, 3);
}
static inline void xnor_avx2_popcnt(__m256i a_bit256, __m256i b_bit256, __m256i *count_sum) {
__m256i c_bit256 = _mm256_set1_epi8((char)255);
__m256i xor256 = _mm256_xor_si256(a_bit256, b_bit256); // xnor = not(xor(a,b))
c_bit256 = _mm256_andnot_si256(xor256, c_bit256); // can be optimized - we can do other NOT for wegihts once and do not do this NOT
*count_sum = _mm256_add_epi64(count256(c_bit256), *count_sum); // 1st part - popcnt Mula's algorithm
}
// 2nd part - popcnt Mula's algorithm
static inline int get_count_mula(__m256i count_sum) {
return _mm256_extract_epi64(count_sum, 0)
+ _mm256_extract_epi64(count_sum, 1)
+ _mm256_extract_epi64(count_sum, 2)
+ _mm256_extract_epi64(count_sum, 3);
}
// 5x times faster than gemm()-float32
// further optimizations: do mean-mult only for the last layer
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#if defined(_OPENMP)
static int max_num_threads = 0;
if (max_num_threads == 0) {
max_num_threads = omp_get_max_threads();
//omp_set_num_threads(max_num_threads / 2);
}
#endif
//#pragma omp parallel for
//for (i = 0; i < M; ++i)
#pragma omp parallel for
for (i = 0; i < (M/2)*2; i += 2)
{ // l.n - filters [16 - 55 - 1024]
float mean_val_0 = mean_arr[i + 0];
float mean_val_1 = mean_arr[i + 1];
int j, k;
//__m256i all_1 = _mm256_set1_epi8(255);
//for (j = 0; j < N; ++j)
for (j = 0; j < (N/2)*2; j += 2)
{ // out_h*out_w - one channel output size [169 - 173056]
//int count = 0;
const int bit_step = 256;
__m256i count_sum_0 = _mm256_set1_epi8(0);
__m256i count_sum_1 = _mm256_set1_epi8(0);
__m256i count_sum_2 = _mm256_set1_epi8(0);
__m256i count_sum_3 = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
__m256i a_bit256_1 = _mm256_loadu_si256((__m256i *)(A + ((i + 1)*lda + k) / 8));
__m256i b_bit256_1 = _mm256_loadu_si256((__m256i *)(B + ((j + 1)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum_0);
xnor_avx2_popcnt(a_bit256_0, b_bit256_1, &count_sum_1);
xnor_avx2_popcnt(a_bit256_1, b_bit256_0, &count_sum_2);
xnor_avx2_popcnt(a_bit256_1, b_bit256_1, &count_sum_3);
//count += popcnt256(c_bit256);
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
int count_0 = get_count_mula(count_sum_0);
int count_1 = get_count_mula(count_sum_1);
int count_2 = get_count_mula(count_sum_2);
int count_3 = get_count_mula(count_sum_3);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count_0 = count_0 - f1; // remove extra bits (from empty space for align only)
count_1 = count_1 - f1;
count_2 = count_2 - f1;
count_3 = count_3 - f1;
C[i*ldc + (j + 0)] = (2 * count_0 - K) * mean_val_0;
C[i*ldc + (j + 1)] = (2 * count_1 - K) * mean_val_0;
C[(i + 1)*ldc + (j + 0)] = (2 * count_2 - K) * mean_val_1;
C[(i + 1)*ldc + (j + 1)] = (2 * count_3 - K) * mean_val_1;
}
int i_d;
for (i_d = 0; i_d < 2; ++i_d)
{
float mean_val = mean_arr[i + i_d];
for (j = (N / 2) * 2; j < N; j += 1)
{ // out_h*out_w - one channel output size [169 - 173056]
const int bit_step = 256;
__m256i count_sum = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + i_d + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum);
}
int count = get_count_mula(count_sum);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[(i + i_d)*ldc + j] = (2 * count - K) * mean_val;
}
}
}
for (i = (M / 2) * 2; i < M; i += 1)
{
float mean_val = mean_arr[i];
int j, k;
for (j = 0; j < N; j += 1)
{ // out_h*out_w - one channel output size [169 - 173056]
const int bit_step = 256;
__m256i count_sum = _mm256_set1_epi8(0);
for (k = 0; k < K; k += bit_step) { // l.size*l.size*l.c - one filter size [27 - 9216]
__m256i a_bit256_0 = _mm256_loadu_si256((__m256i *)(A + ((i + 0)*lda + k) / 8));
__m256i b_bit256_0 = _mm256_loadu_si256((__m256i *)(B + ((j + 0)*ldb + k) / 8));
xnor_avx2_popcnt(a_bit256_0, b_bit256_0, &count_sum);
}
int count = get_count_mula(count_sum);
const int f1 = (K % bit_step == 0) ? 0 : (bit_step - (K % bit_step));
count = count - f1; // remove extra bits (from empty space for align only)
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_transpose(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int ldb_align)
{
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
int c;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 4; w+=8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
data_col[col_index + ldb_align * 0] = _mm256_extract_float32(src256, 0);// src256.m256_f32[0];
data_col[col_index + ldb_align * 1] = _mm256_extract_float32(src256, 1);// src256.m256_f32[1];
data_col[col_index + ldb_align * 2] = _mm256_extract_float32(src256, 2);// src256.m256_f32[2];
data_col[col_index + ldb_align * 3] = _mm256_extract_float32(src256, 3);// src256.m256_f32[3];
data_col[col_index + ldb_align * 4] = _mm256_extract_float32(src256, 4);// src256.m256_f32[4];
data_col[col_index + ldb_align * 5] = _mm256_extract_float32(src256, 5);// src256.m256_f32[5];
data_col[col_index + ldb_align * 6] = _mm256_extract_float32(src256, 6);// src256.m256_f32[6];
data_col[col_index + ldb_align * 7] = _mm256_extract_float32(src256, 7);// src256.m256_f32[7];
//_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = 0; h < height_col; ++h) {
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h * stride;
int im_col = w_offset + w * stride;
int col_index = (h * width_col + w)*ldb_align + c; // transposed & aligned
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col-pad; ++h) {
for (w = pad; w < width_col-pad-8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col-1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col-1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
//printf("\n Error: is no non-optimized version \n");
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_align(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
_mm256_storeu_ps(&data_col[col_index], src256);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_bin(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1 && is_fma_avx2())
{
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 float_zero256 = _mm256_set1_ps(0.00);
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 8) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//__m256i src256 = _mm256_loadu_si256((__m256i *)(&data_im[im_col + width*(im_row + height*c_im)]));
//__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
//uint16_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
//mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
__m256 src256 = _mm256_loadu_ps((float *)(&data_im[im_col + width*(im_row + height*c_im)]));
__m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
uint16_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
uint16_t* dst_ptr = (uint16_t*)&((uint8_t*)data_col)[col_index / 8];
*dst_ptr |= (mask << (col_index % 8));
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char* const)data_col, col_index);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
{
int i = 0;
if (a == LINEAR)
{}
else if (a == LEAKY)
{
if (is_fma_avx2()) {
__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 all256_01 = _mm256_set1_ps(0.1F);
for (i = 0; i < n - 8; i += 8) {
//x[i] = (x[i]>0) ? x[i] : .1*x[i];
__m256 src256 = _mm256_loadu_ps(&x[i]);
__m256 mult256 = _mm256_mul_ps((src256), all256_01); // mult * 0.1
__m256i sign256 = _mm256_and_si256(_mm256_castps_si256(src256), all256_sing1); // check sign in 8 x 32-bit floats
__m256 result256 = _mm256_blendv_ps(src256, mult256, _mm256_castsi256_ps(sign256)); // (sign>0) ? src : mult;
_mm256_storeu_ps(&x[i], result256);
}
}
for (; i < n; ++i) {
x[i] = (x[i]>0) ? x[i] : .1*x[i];
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void float_to_bit(float *src, unsigned char *dst, size_t size)
{
size_t dst_size = size / 8 + 1;
memset(dst, 0, dst_size);
size_t i;
//__m256i all256_sing1 = _mm256_set_epi32(0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000, 0x80000000);
__m256 float_zero256 = _mm256_set1_ps(0.0);
for (i = 0; i < size; i+=8)
{
//__m256i src256 = _mm256_loadu_si256((__m256i *)(&src[i]));
//__m256i result256 = _mm256_and_si256(src256, all256_sing1); // check sign in 8 x 32-bit floats
//uint32_t mask = _mm256_movemask_ps(_mm256_castsi256_ps(result256)); // (val >= 0) ? 0 : 1
////mask = ~mask; // inverse mask, (val >= 0) ? 1 : 0
__m256 src256 = _mm256_loadu_ps((float *)(&src[i]));
__m256 result256 = _mm256_cmp_ps(src256, float_zero256, _CMP_GT_OS);
uint32_t mask = _mm256_movemask_ps(result256); // (val > 0) ? 0 : 1
dst[i / 8] = mask;
}
}
static inline void transpose4x4_SSE(float *A, float *B, const int lda, const int ldb)
{
__m128 row1 = _mm_loadu_ps(&A[0 * lda]);
__m128 row2 = _mm_loadu_ps(&A[1 * lda]);
__m128 row3 = _mm_loadu_ps(&A[2 * lda]);
__m128 row4 = _mm_loadu_ps(&A[3 * lda]);
_MM_TRANSPOSE4_PS(row1, row2, row3, row4);
_mm_storeu_ps(&B[0 * ldb], row1);
_mm_storeu_ps(&B[1 * ldb], row2);
_mm_storeu_ps(&B[2 * ldb], row3);
_mm_storeu_ps(&B[3 * ldb], row4);
}
void transpose_block_SSE4x4(float *A, float *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; i += block_size) {
int j, i2, j2;
//int max_i2 = (i + block_size < n) ? (i + block_size) : n;
if (i + block_size < n) {
int max_i2 = i + block_size;
for (j = 0; j < m; j += block_size) {
//int max_j2 = (j + block_size < m) ? (j + block_size) : m;
if (j + block_size < m) {
int max_j2 = j + block_size;
for (i2 = i; i2 < max_i2; i2 += 4) {
for (j2 = j; j2 < max_j2; j2 += 4) {
transpose4x4_SSE(&A[i2*lda + j2], &B[j2*ldb + i2], lda, ldb);
}
}
}
else {
for (i2 = i; i2 < max_i2; ++i2) {
for (j2 = j; j2 < m; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
else {
for (i2 = i; i2 < n; ++i2) {
for (j2 = 0; j2 < m; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
int pad, int stride, int batch)
{
const int w_offset = -pad / 2;
const int h_offset = -pad / 2;
int b, k;
for (b = 0; b < batch; ++b) {
#pragma omp parallel for
for (k = 0; k < c; ++k) {
int i, j, m, n;
for (i = 0; i < out_h; ++i) {
//for (j = 0; j < out_w; ++j) {
j = 0;
if(stride == 1 && is_avx() == 1) {
for (j = 0; j < out_w - 8 - (size - 1); j += 8) {
int out_index = j + out_w*(i + out_h*(k + c*b));
__m256 max256 = _mm256_set1_ps(-FLT_MAX);
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
if (!valid) continue;
__m256 src256 = _mm256_loadu_ps(&src[index]);
max256 = _mm256_max_ps(src256, max256);
}
}
_mm256_storeu_ps(&dst[out_index], max256);
}
}
else if (size == 2 && stride == 2 && is_avx() == 1) {
for (j = 0; j < out_w - 4; j += 4) {
int out_index = j + out_w*(i + out_h*(k + c*b));
//float max = -FLT_MAX;
//int max_i = -1;
__m128 max128 = _mm_set1_ps(-FLT_MAX);
for (n = 0; n < size; ++n) {
//for (m = 0; m < size; ++m)
m = 0;
{
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
if (!valid) continue;
__m256 src256 = _mm256_loadu_ps(&src[index]);
__m256 src256_2 = _mm256_permute_ps(src256, (1 << 0) | (3 << 4));
__m256 max256 = _mm256_max_ps(src256, src256_2);
__m128 src128_0 = _mm256_extractf128_ps(max256, 0);
__m128 src128_1 = _mm256_extractf128_ps(max256, 1);
__m128 src128 = _mm_shuffle_ps(src128_0, src128_1, (2 << 2) | (2 << 6));
max128 = _mm_max_ps(src128, max128);
}
}
_mm_storeu_ps(&dst[out_index], max128);
}
}
for (; j < out_w; ++j) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
float val = (valid != 0) ? src[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
dst[out_index] = max;
if (indexes) indexes[out_index] = max_i;
}
}
}
}
}
#else // AVX
int is_avx() {
return 0;
}
int is_fma_avx2() {
return 0;
}
void gemm_nn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA * A[i * lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_fast(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i, j, k;
#pragma omp parallel for
for (i = 0; i < M; ++i) {
for (k = 0; k < K; ++k) {
PUT_IN_REGISTER float A_PART = ALPHA*A[i*lda + k];
for (j = 0; j < N; ++j) {
C[i*ldc + j] += A_PART*B[k*ldb + j];
}
}
}
}
void gemm_nn_bin_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
//printf(" l.mean_arr[i] = %d \n ", l.mean_arr[i]);
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
//PUT_IN_REGISTER float A_PART = 1*a[i*k + s];
PUT_IN_REGISTER uint32_t A_PART = A[i * lda + s];
for (j = 0; j < N; ++j) // out_h*out_w;
{
//c[i*n + j] += A_PART*b[s*n + j];
PUT_IN_REGISTER uint32_t B_PART = B[s * ldb + j];
uint32_t xnor_result = ~(A_PART ^ B_PART);
//printf(" xnor_result = %d, ", xnor_result);
int32_t count = popcnt_32(xnor_result); // must be Signed int
C[i*ldc + j] += (2 * count - 32) * mean_val;
//c[i*n + j] += count*mean;
}
}
}
}
void convolution_2d(int w, int h, int ksize, int n, int c, int pad, int stride,
float *weights, float *input, float *output, float *mean)
{
const int out_h = (h + 2 * pad - ksize) / stride + 1; // output_height=input_height for stride=1 and pad=1
const int out_w = (w + 2 * pad - ksize) / stride + 1; // output_width=input_width for stride=1 and pad=1
//int i, f, j;
int fil;
// filter index
#pragma omp parallel for // "omp parallel for" - automatic parallelization of loop by using OpenMP
for (fil = 0; fil < n; ++fil) {
int chan, y, x, f_y, f_x;
// channel index
for (chan = 0; chan < c; ++chan)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
int const weights_pre_index = fil*c*ksize*ksize + chan*ksize*ksize;
int const input_pre_index = chan*w*h;
float sum = 0;
// filter - y
for (f_y = 0; f_y < ksize; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < ksize; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
int input_index = input_pre_index + input_y*w + input_x;
int weights_index = weights_pre_index + f_y*ksize + f_x;
sum += input[input_index] * weights[weights_index];
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
static inline int popcnt_64(uint64_t val64) {
#ifdef WIN32 // Windows
#ifdef _WIN64 // Windows 64-bit
int tmp_count = __popcnt64(val64);
#else // Windows 32-bit
int tmp_count = __popcnt(val64);
tmp_count += __popcnt(val64 >> 32);
#endif
#else // Linux
#if defined(__x86_64__) || defined(__aarch64__) // Linux 64-bit
int tmp_count = __builtin_popcountll(val64);
#else // Linux 32-bit
int tmp_count = __builtin_popcount(val64);
tmp_count += __builtin_popcount(val64 >> 32);
#endif
#endif
return tmp_count;
}
void gemm_nn_custom_bin_mean_transposed(int M, int N, int K, float ALPHA_UNUSED,
unsigned char *A, int lda,
unsigned char *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n - filters [16 - 55 - 1024]
int j, k;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) { // out_h*out_w - one channel output size [169 - 173056]
int count = 0;
for (k = 0; k < K; k += 64) { // l.size*l.size*l.c - one filter size [27 - 9216]
uint64_t a_bit64 = *((uint64_t *)(A + (i*lda + k) / 8));
uint64_t b_bit64 = *((uint64_t *)(B + (j*ldb + k) / 8));
uint64_t c_bit64 = xnor_int64(a_bit64, b_bit64);
int tmp_count = popcnt_64(c_bit64);
if (K - k < 64) tmp_count = tmp_count - (64 - (K - k)); // remove extra bits
count += tmp_count;
//binary_int64_printf(c_bit64);
//printf(", count = %d \n\n", tmp_count);
}
C[i*ldc + j] = (2 * count - K) * mean_val;
}
}
}
void im2col_cpu_custom_transpose(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int ldb_align)
{
printf("\n im2col_cpu_custom_transpose() isn't implemented without AVX \n");
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col)
{
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
return;
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
int col_index = (c * height_col + h) * width_col + w;
data_col[col_index] = im2col_get_pixel(data_im, height, width, channels,
im_row, im_col, c_im, pad);
}
}
}
}
else {
//printf("\n Error: is no non-optimized version \n");
im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col);
}
}
//From Berkeley Vision's Caffe!
//https://github.com/BVLC/caffe/blob/master/LICENSE
void im2col_cpu_custom_bin(float* data_im,
int channels, int height, int width,
int ksize, int stride, int pad, float* data_col, int bit_align)
{
int c;
const int height_col = (height + 2 * pad - ksize) / stride + 1;
const int width_col = (width + 2 * pad - ksize) / stride + 1;
const int channels_col = channels * ksize * ksize;
// optimized version
if (height_col == height && width_col == width && stride == 1 && pad == 1)
{
int new_ldb = bit_align;
#pragma omp parallel for
for (c = 0; c < channels_col; ++c) {
int h, w;
int w_offset = c % ksize;
int h_offset = (c / ksize) % ksize;
int c_im = c / ksize / ksize;
for (h = pad; h < height_col - pad; ++h) {
for (w = pad; w < width_col - pad - 8; w += 1) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
for (; w < width_col - pad; ++w) {
int im_row = h_offset + h - pad;
int im_col = w_offset + w - pad;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = data_im[im_col + width*(im_row + height*c_im)];
float val = data_im[im_col + width*(im_row + height*c_im)];
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
w = 0;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
w = width_col - 1;
for (h = 0; h < height_col; ++h) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
h = 0;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
{
h = height_col - 1;
for (w = 0; w < width_col; ++w) {
int im_row = h_offset + h;
int im_col = w_offset + w;
//int col_index = (c * height_col + h) * width_col + w;
int col_index = c * new_ldb + h * width_col + w;
//data_col[col_index] = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
float val = im2col_get_pixel(data_im, height, width, channels, im_row, im_col, c_im, pad);
if (val > 0) set_bit((unsigned char*)data_col, col_index);
}
}
}
}
else {
printf("\n Error: is no non-optimized version \n");
//im2col_cpu(data_im, channels, height, width, ksize, stride, pad, data_col); // must be aligned for transpose after float_to_bin
// float_to_bit(b, t_input, src_size);
// transpose_bin(t_input, *t_bit_input, k, n, bit_align, new_ldb, 8);
}
}
void activate_array_cpu_custom(float *x, const int n, const ACTIVATION a)
{
int i;
if (a == LINEAR)
{
}
else if (a == LEAKY)
{
for (i = 0; i < n; ++i) {
x[i] = (x[i]>0) ? x[i] : .1*x[i];
}
}
else {
for (i = 0; i < n; ++i) {
x[i] = activate(x[i], a);
}
}
}
void float_to_bit(float *src, unsigned char *dst, size_t size)
{
size_t dst_size = size / 8 + 1;
memset(dst, 0, dst_size);
size_t i;
char* byte_arr = (char*)xcalloc(size, sizeof(char));
for (i = 0; i < size; ++i) {
if (src[i] > 0) byte_arr[i] = 1;
}
//for (i = 0; i < size; ++i) {
// dst[i / 8] |= byte_arr[i] << (i % 8);
//}
for (i = 0; i < size; i += 8) {
char dst_tmp = 0;
dst_tmp |= byte_arr[i + 0] << 0;
dst_tmp |= byte_arr[i + 1] << 1;
dst_tmp |= byte_arr[i + 2] << 2;
dst_tmp |= byte_arr[i + 3] << 3;
dst_tmp |= byte_arr[i + 4] << 4;
dst_tmp |= byte_arr[i + 5] << 5;
dst_tmp |= byte_arr[i + 6] << 6;
dst_tmp |= byte_arr[i + 7] << 7;
dst[i / 8] = dst_tmp;
}
free(byte_arr);
}
static inline void transpose_scalar_block(float *A, float *B, const int lda, const int ldb, const int block_size)
{
int i;
//#pragma omp parallel for
for (i = 0; i<block_size; i++) {
int j;
for (j = 0; j<block_size; j++) {
B[j*ldb + i] = A[i*lda + j];
}
}
}
void transpose_block_SSE4x4(float *A, float *B, const int n, const int m,
const int lda, const int ldb, const int block_size)
{
int i;
#pragma omp parallel for
for (i = 0; i < n; i += block_size) {
int j, i2, j2;
for (j = 0; j < m; j += block_size) {
int max_i2 = i + block_size < n ? i + block_size : n;
int max_j2 = j + block_size < m ? j + block_size : m;
for (i2 = i; i2 < max_i2; ++i2) {
for (j2 = j; j2 < max_j2; ++j2) {
B[j2*ldb + i2] = A[i2*lda + j2];
}
}
}
}
}
void forward_maxpool_layer_avx(float *src, float *dst, int *indexes, int size, int w, int h, int out_w, int out_h, int c,
int pad, int stride, int batch)
{
int b, k;
const int w_offset = -pad / 2;
const int h_offset = -pad / 2;
for (b = 0; b < batch; ++b) {
#pragma omp parallel for
for (k = 0; k < c; ++k) {
int i, j, m, n;
for (i = 0; i < out_h; ++i) {
for (j = 0; j < out_w; ++j) {
int out_index = j + out_w*(i + out_h*(k + c*b));
float max = -FLT_MAX;
int max_i = -1;
for (n = 0; n < size; ++n) {
for (m = 0; m < size; ++m) {
int cur_h = h_offset + i*stride + n;
int cur_w = w_offset + j*stride + m;
int index = cur_w + w*(cur_h + h*(k + b*c));
int valid = (cur_h >= 0 && cur_h < h &&
cur_w >= 0 && cur_w < w);
float val = (valid != 0) ? src[index] : -FLT_MAX;
max_i = (val > max) ? index : max_i;
max = (val > max) ? val : max;
}
}
dst[out_index] = max;
if (indexes) indexes[out_index] = max_i;
}
}
}
}
}
#endif // AVX
// 32 channels -> 1 channel (with 32 floats)
// 256 channels -> 8 channels (with 32 floats)
void repack_input(float *input, float *re_packed_input, int w, int h, int c)
{
const int items_per_channel = w * h;
int chan, i;
for (chan = 0; chan < c; chan += 32)
{
for (i = 0; i < items_per_channel; ++i)
{
int c_pack;
for (c_pack = 0; c_pack < 32; ++c_pack) {
float src = input[(chan + c_pack)*items_per_channel + i];
re_packed_input[chan*items_per_channel + i * 32 + c_pack] = src;
}
}
}
}
void transpose_uint32(uint32_t *src, uint32_t *dst, int src_h, int src_w, int src_align, int dst_align)
{
//l.bit_align - algined (n) by 32
//new_ldb - aligned (k) by 256
int i;
//#pragma omp parallel for
for (i = 0; i < src_h; i += 1) // l.size*l.size*l.c;
{
int j;
for (j = 0; j < src_w; j += 1) // out_h*out_w;
{
((uint32_t *)dst)[j*dst_align / 32 + i] = ((uint32_t *)src)[i*src_align + j];
}
}
}
void gemm_nn_bin_transposed_32bit_packed(int M, int N, int K, float ALPHA,
uint32_t *A, int lda,
uint32_t *B, int ldb,
float *C, int ldc, float *mean_arr)
{
int i;
#pragma omp parallel for
for (i = 0; i < M; ++i) { // l.n
int j, s;
float mean_val = mean_arr[i];
for (j = 0; j < N; ++j) // out_h*out_w;
{
float val = 0;
for (s = 0; s < K; ++s) // l.size*l.size*l.c/32 or (l.size*l.size*l.c)
{
PUT_IN_REGISTER uint32_t A_PART = ((uint32_t*)A)[i*lda + s];
PUT_IN_REGISTER uint32_t B_PART = ((uint32_t*)B)[j * ldb + s];
uint32_t xnor_result = ~(A_PART ^ B_PART);
int32_t count = popcnt_32(xnor_result); // must be Signed int
val += (2 * count - 32) * mean_val;
}
C[i*ldc + j] += val;
}
}
}
void convolution_repacked(uint32_t *packed_input, uint32_t *packed_weights, float *output,
int w, int h, int c, int n, int size, int pad, int new_lda, float *mean_arr)
{
int fil;
// filter index
#pragma omp parallel for
for (fil = 0; fil < n; ++fil) {
float mean_val = mean_arr[fil];
int chan, y, x, f_y, f_x; // c_pack
// channel index
for (chan = 0; chan < c / 32; ++chan)
//for (chan = 0; chan < l.c; chan += 32)
//for (c_pack = 0; c_pack < 32; ++c_pack)
// input - y
for (y = 0; y < h; ++y)
// input - x
for (x = 0; x < w; ++x)
{
int const output_index = fil*w*h + y*w + x;
float sum = 0;
// filter - y
for (f_y = 0; f_y < size; ++f_y)
{
int input_y = y + f_y - pad;
// filter - x
for (f_x = 0; f_x < size; ++f_x)
{
int input_x = x + f_x - pad;
if (input_y < 0 || input_x < 0 || input_y >= h || input_x >= w) continue;
// normal
//float input = state.input[(chan + c_pack)*l.w*l.h + input_y*l.w + input_x];
//float weight = l.weights[fil*l.c*l.size*l.size + (chan + c_pack)*l.size*l.size + f_y*l.size + f_x];
// packed
//float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
//float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
//sum += input * weight;
//float input = re_packed_input[chan*l.w*l.h + (input_y*l.w + input_x) * 32 + c_pack];
//float weight = l.weights[fil*l.c*l.size*l.size + chan*l.size*l.size + (f_y*l.size + f_x) * 32 + c_pack];
//uint32_t bit1 = input > 0;
//uint32_t bit2 = weight > 0;
//uint32_t count = (~(bit1 ^ bit2)) & 1;
//float result = (2 * (float)count - 1) * mean_val;
//printf("\n mul = %f, bit1 = %d, bit2 = %d, count = %d, mean = %f, result = %f ", input*weight, bit1, bit2, count, mean_val, result);
//sum += result;
uint32_t input = ((uint32_t *)packed_input)[chan*w*h + input_y*w + input_x];
//uint32_t weight = ((uint32_t *)l.align_bit_weights)[fil*l.c*l.size*l.size/32 + chan*l.size*l.size + f_y*l.size + f_x];
uint32_t weight = ((uint32_t *)packed_weights)[fil*new_lda / 32 + chan*size*size + f_y*size + f_x];
uint32_t xnor_result = ~(input ^ weight);
int32_t count = popcnt_32(xnor_result); // mandatory Signed int
sum += (2 * count - 32) * mean_val;
}
}
// l.output[filters][width][height] +=
// state.input[channels][width][height] *
// l.weights[filters][channels][filter_width][filter_height];
output[output_index] += sum;
}
}
}
void gemm_nt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
PUT_IN_REGISTER float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i*lda+k]*B[j*ldb + k];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_tn(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(k = 0; k < K; ++k){
PUT_IN_REGISTER float A_PART = ALPHA * A[k * lda + i];
for(j = 0; j < N; ++j){
C[i*ldc+j] += A_PART*B[k*ldb+j];
}
}
}
}
void gemm_tt(int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float *C, int ldc)
{
int i,j,k;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
PUT_IN_REGISTER float sum = 0;
for(k = 0; k < K; ++k){
sum += ALPHA*A[i+k*lda]*B[k+j*ldb];
}
C[i*ldc+j] += sum;
}
}
}
void gemm_cpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
//printf("cpu: %d %d %d %d %d %f %d %d %f %d\n",TA, TB, M, N, K, ALPHA, lda, ldb, BETA, ldc);
if (BETA != 1){
int i, j;
for(i = 0; i < M; ++i){
for(j = 0; j < N; ++j){
C[i*ldc + j] *= BETA;
}
}
}
is_avx(); // initialize static variable
if (is_fma_avx2() && !TA && !TB) {
gemm_nn_fast(M, N, K, ALPHA, A, lda, B, ldb, C, ldc);
}
else {
int t;
#pragma omp parallel for
for (t = 0; t < M; ++t) {
if (!TA && !TB)
gemm_nn(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else if (TA && !TB)
gemm_tn(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
else if (!TA && TB)
gemm_nt(1, N, K, ALPHA, A + t*lda, lda, B, ldb, C + t*ldc, ldc);
else
gemm_tt(1, N, K, ALPHA, A + t, lda, B, ldb, C + t*ldc, ldc);
}
}
}
#ifdef GPU
#include <math.h>
void gemm_ongpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A_gpu, int lda,
float *B_gpu, int ldb,
float BETA,
float *C_gpu, int ldc)
{
cublasHandle_t handle = blas_handle();
cudaError_t stream_status = (cudaError_t)cublasSetStream(handle, get_cuda_stream());
CHECK_CUDA(stream_status);
cudaError_t status = (cudaError_t)cublasSgemm(handle, (TB ? CUBLAS_OP_T : CUBLAS_OP_N),
(TA ? CUBLAS_OP_T : CUBLAS_OP_N), N, M, K, &ALPHA, B_gpu, ldb, A_gpu, lda, &BETA, C_gpu, ldc);
CHECK_CUDA(status);
}
void gemm_gpu(int TA, int TB, int M, int N, int K, float ALPHA,
float *A, int lda,
float *B, int ldb,
float BETA,
float *C, int ldc)
{
float *A_gpu = cuda_make_array(A, (TA ? lda*K:lda*M));
float *B_gpu = cuda_make_array(B, (TB ? ldb*N : ldb*K));
float *C_gpu = cuda_make_array(C, ldc*M);
gemm_ongpu(TA, TB, M, N, K, ALPHA, A_gpu, lda, B_gpu, ldb, BETA, C_gpu, ldc);
cuda_pull_array(C_gpu, C, ldc*M);
cuda_free(A_gpu);
cuda_free(B_gpu);
cuda_free(C_gpu);
}
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
void time_gpu_random_matrix(int TA, int TB, int m, int k, int n)
{
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
int i;
clock_t start = clock(), end;
for(i = 0; i<32; ++i){
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
}
end = clock();
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s\n",m,k,k,n, TA, TB, (float)(end-start)/CLOCKS_PER_SEC);
free(a);
free(b);
free(c);
}
void time_ongpu(int TA, int TB, int m, int k, int n)
{
int iter = 10;
float *a = random_matrix(m,k);
float *b = random_matrix(k,n);
int lda = (!TA)?k:m;
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *a_cl = cuda_make_array(a, m*k);
float *b_cl = cuda_make_array(b, k*n);
float *c_cl = cuda_make_array(c, m*n);
int i;
clock_t start = clock(), end;
for(i = 0; i<iter; ++i){
gemm_ongpu(TA,TB,m,n,k,1,a_cl,lda,b_cl,ldb,1,c_cl,n);
cudaDeviceSynchronize();
}
double flop = ((double)m)*n*(2.*k + 2.)*iter;
double gflop = flop/pow(10., 9);
end = clock();
double seconds = sec(end-start);
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %lf s, %lf GFLOPS\n",m,k,k,n, TA, TB, seconds, gflop/seconds);
cuda_free(a_cl);
cuda_free(b_cl);
cuda_free(c_cl);
free(a);
free(b);
free(c);
}
void test_gpu_accuracy(int TA, int TB, int m, int k, int n)
{
srand(0);
float *a;
if(!TA) a = random_matrix(m,k);
else a = random_matrix(k,m);
int lda = (!TA)?k:m;
float *b;
if(!TB) b = random_matrix(k,n);
else b = random_matrix(n,k);
int ldb = (!TB)?n:k;
float *c = random_matrix(m,n);
float *c_gpu = random_matrix(m,n);
memset(c, 0, m*n*sizeof(float));
memset(c_gpu, 0, m*n*sizeof(float));
int i;
//pm(m,k,b);
gemm_gpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c_gpu,n);
//printf("GPU\n");
//pm(m, n, c_gpu);
gemm_cpu(TA,TB,m,n,k,1,a,lda,b,ldb,1,c,n);
//printf("\n\nCPU\n");
//pm(m, n, c);
double sse = 0;
for(i = 0; i < m*n; ++i) {
//printf("%f %f\n", c[i], c_gpu[i]);
sse += pow(c[i]-c_gpu[i], 2);
}
printf("Matrix Multiplication %dx%d * %dx%d, TA=%d, TB=%d: %g SSE\n",m,k,k,n, TA, TB, sse/(m*n));
free(a);
free(b);
free(c);
free(c_gpu);
}
int test_gpu_blas()
{
/*
test_gpu_accuracy(0,0,10,576,75);
test_gpu_accuracy(0,0,17,10,10);
test_gpu_accuracy(1,0,17,10,10);
test_gpu_accuracy(0,1,17,10,10);
test_gpu_accuracy(1,1,17,10,10);
test_gpu_accuracy(0,0,1000,10,100);
test_gpu_accuracy(1,0,1000,10,100);
test_gpu_accuracy(0,1,1000,10,100);
test_gpu_accuracy(1,1,1000,10,100);
test_gpu_accuracy(0,0,10,10,10);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,64,2916,363);
time_ongpu(0,0,192,729,1600);
time_ongpu(0,0,384,196,1728);
time_ongpu(0,0,256,196,3456);
time_ongpu(0,0,256,196,2304);
time_ongpu(0,0,128,4096,12544);
time_ongpu(0,0,128,4096,4096);
*/
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,75,12544);
time_ongpu(0,0,64,576,12544);
time_ongpu(0,0,256,2304,784);
time_ongpu(1,1,2304,256,784);
time_ongpu(0,0,512,4608,196);
time_ongpu(1,1,4608,512,196);
return 0;
}
#endif
void init_cpu() {
is_avx();
is_fma_avx2();
}
|
SpatialConvolutionMap.c | #ifndef TH_GENERIC_FILE
#define TH_GENERIC_FILE "generic/SpatialConvolutionMap.c"
#else
void THNN_(SpatialConvolutionMap_updateOutput)(
THNNState *state, THTensor *input, THTensor *output, THTensor *weight, THTensor *bias,
THTensor *connTable, int nInputPlane, int nOutputPlane,
int dW, int dH)
{
THArgCheck(
weight != NULL && weight->nDimension == 3
&& connTable != NULL && connTable->size[0] == weight->size[0], 4,
"3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE
);
int dimw = 2;
int dimh = 1;
int dimc = 0;
long nbatch = 1;
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D(batch mode) tensor expected");
if (input->nDimension == 4)
{
nbatch = input->size[0];
dimc++;
dimw++;
dimh++;
}
const long kH = weight->size[1];
const long kW = weight->size[2];
THArgCheck(input->size[dimc] >= nInputPlane, 2, "invalid number of input planes");
THArgCheck(input->size[dimw] >= kW && input->size[dimh] >= kH, 2, "input image smaller than kernel size");
const long input_w = input->size[dimw];
const long input_h = input->size[dimh];
const long output_w = (input_w - kW) / dW + 1;
const long output_h = (input_h - kH) / dH + 1;
if (input->nDimension == 3)
THTensor_(resize3d)(output, nOutputPlane, output_h, output_w);
else
THTensor_(resize4d)(output, input->size[0], nOutputPlane, output_h, output_w);
/* contiguous */
input = THTensor_(newContiguous)(input);
output = THTensor_(newContiguous)(output);
weight = THTensor_(newContiguous)(weight);
bias = bias ? THTensor_(newContiguous)(bias) : bias;
connTable = THTensor_(newContiguous)(connTable);
/* get raw pointers */
real *input_data = THTensor_(data)(input);
real *output_data = THTensor_(data)(output);
real *weight_data = THTensor_(data)(weight);
real *bias_data = THTensor_(data)(bias);
real *connTable_data = THTensor_(data)(connTable);
long p;
#pragma omp parallel for private(p)
for (p = 0; p < nOutputPlane; p++)
{
long m;
for (m = 0; m < nbatch; m++)
{
/* add bias */
real *ptr_output = output_data + p*output_w*output_h + m*nOutputPlane*output_w*output_h;
long j, k;
real z= bias_data[p];
for (j = 0; j < output_h*output_w; j++)
ptr_output[j] = z;
/* convolve all maps */
int nweight = connTable->size[0];
for (k = 0; k < nweight; k++)
{
/* get offsets for input/output */
int o = (int)connTable_data[k*2+1] - TH_INDEX_BASE;
int i = (int)connTable_data[k*2+0] - TH_INDEX_BASE;
if (o == p)
{
THTensor_(validXCorr2Dptr)(
output_data + o*output_w*output_h + m*nOutputPlane*output_w*output_h,
1.0,
input_data + i*input_w*input_h + m*nInputPlane*input_w*input_h, input_h, input_w,
weight_data + k*kW*kH,
kH, kW,
dH, dW
);
}
}
}
}
/* clean up */
THTensor_(free)(input);
THTensor_(free)(output);
THTensor_(free)(weight);
if (bias) THTensor_(free)(bias);
THTensor_(free)(connTable);
}
void THNN_(SpatialConvolutionMap_updateGradInput)(
THNNState *state, THTensor *input, THTensor *gradOutput, THTensor *gradInput, THTensor *weight, THTensor *bias,
THTensor *connTable, int nInputPlane, int nOutputPlane,
int dW, int dH)
{
THArgCheck(
weight != NULL && weight->nDimension == 3
&& connTable != NULL && connTable->size[0] == weight->size[0], 5,
"3D weight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE
);
/* and dims */
int dimw = 2;
int dimh = 1;
long nbatch = 1;
if (input->nDimension == 4)
{
nbatch = input->size[0];
dimw++;
dimh++;
}
const long input_h = input->size[dimh];
const long input_w = input->size[dimw];
const long output_h = gradOutput->size[dimh];
const long output_w = gradOutput->size[dimw];
const long kH = weight->size[1];
const long kW = weight->size[2];
/* contiguous */
gradInput = THTensor_(newContiguous)(gradInput);
gradOutput = THTensor_(newContiguous)(gradOutput);
weight = THTensor_(newContiguous)(weight);
connTable = THTensor_(newContiguous)(connTable);
/* Resize/Zero */
THTensor_(resizeAs)(gradInput, input);
THTensor_(zero)(gradInput);
/* get raw pointers */
real *gradInput_data = THTensor_(data)(gradInput);
real *gradOutput_data = THTensor_(data)(gradOutput);
real *weight_data = THTensor_(data)(weight);
real *connTable_data = THTensor_(data)(connTable);
long p;
#pragma omp parallel for private(p)
for (p = 0; p < nInputPlane; p++)
{
long m;
for (m = 0; m < nbatch; m++)
{
long k;
/* backward all */
int nkernel = connTable->size[0];
for (k = 0; k < nkernel; k++)
{
int o = (int)connTable_data[k*2+1] - TH_INDEX_BASE;
int i = (int)connTable_data[k*2+0] - TH_INDEX_BASE;
if (i == p)
{
/* gradient to input */
THTensor_(fullConv2Dptr)(
gradInput_data + i*input_w*input_h + m*nInputPlane*input_w*input_h, 1.0,
gradOutput_data + o*output_w*output_h + m*nOutputPlane*output_w*output_h, output_h, output_w,
weight_data + k*kW*kH, kH, kW, dH, dW
);
}
}
}
}
/* clean up */
THTensor_(free)(gradInput);
THTensor_(free)(gradOutput);
THTensor_(free)(weight);
THTensor_(free)(connTable);
}
void THNN_(SpatialConvolutionMap_accGradParameters)(
THNNState *state,
THTensor *input,
THTensor *gradOutput,
THTensor *gradWeight,
THTensor *gradBias,
THTensor *connTable,
int nInputPlane,
int nOutputPlane,
int dW, int dH,
accreal scale_)
{
real scale = TH_CONVERT_ACCREAL_TO_REAL(scale_);
THArgCheck(
gradWeight != NULL && gradWeight->nDimension == 3
&& connTable != NULL && connTable->size[0] == gradWeight->size[0], 5,
"3D gradWeight tensor expected (connTable:size(%d) x kH x kW)", TH_INDEX_BASE
);
/* and dims */
int dimw = 2;
int dimh = 1;
long nbatch = 1;
if (input->nDimension == 4)
{
nbatch = input->size[0];
dimw++;
dimh++;
}
const long input_h = input->size[dimh];
const long input_w = input->size[dimw];
const long output_h = gradOutput->size[dimh];
const long output_w = gradOutput->size[dimw];
const long kH = gradWeight->size[1];
const long kW = gradWeight->size[2];
/* contiguous */
input = THTensor_(newContiguous)(input);
gradOutput = THTensor_(newContiguous)(gradOutput);
THArgCheck(THTensor_(isContiguous)(gradWeight), 4, "gradWeight needs to be contiguous");
THArgCheck(THTensor_(isContiguous)(gradBias), 5, "gradBias needs to be contiguous");
/* get raw pointers */
real *input_data = THTensor_(data)(input);
real *gradOutput_data = THTensor_(data)(gradOutput);
real *gradWeight_data = THTensor_(data)(gradWeight);
real *gradBias_data = THTensor_(data)(gradBias);
long k;
/* gradients wrt bias */
#pragma omp parallel for private(k)
for (k = 0; k < nOutputPlane; k++)
{
long m;
for (m = 0; m < nbatch; m++)
{
real *ptr_gradOutput = gradOutput_data + k*output_w*output_h + m*nOutputPlane*output_w*output_h;
long l;
for (l = 0; l < output_h*output_w; l++)
gradBias_data[k] += scale*ptr_gradOutput[l];
}
}
/* gradients wrt weight */
const int nkernel = connTable->size[0];
#pragma omp parallel for private(k)
for (k = 0; k < nkernel; k++)
{
long m;
for (m = 0; m < nbatch; m++)
{
int o = (int)THTensor_(get2d)(connTable,k,1) - TH_INDEX_BASE;
int i = (int)THTensor_(get2d)(connTable,k,0) - TH_INDEX_BASE;
/* gradient to kernel */
THTensor_(validXCorr2DRevptr)(
gradWeight_data + k*kW*kH,
scale,
input_data + i*input_w*input_h + m*nInputPlane*input_w*input_h, input_h, input_w,
gradOutput_data + o*output_w*output_h + m*nOutputPlane*output_w*output_h , output_h, output_w,
dH, dW
);
}
}
/* clean up */
THTensor_(free)(input);
THTensor_(free)(gradOutput);
}
#endif
|
heat_1d-a.pluto.c | #include <omp.h>
#include <math.h>
#define ceild(n,d) ceil(((double)(n))/((double)(d)))
#define floord(n,d) floor(((double)(n))/((double)(d)))
#define max(x,y) ((x) > (y)? (x) : (y))
#define min(x,y) ((x) < (y)? (x) : (y))
/*
* Discretized 1D heat equation stencil with non periodic boundary conditions
* Adapted from Pochoir test bench
*/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include <math.h>
/*
* N is the number of points
* T is the number of timesteps
*/
#ifdef HAS_DECLS
#include "decls.h"
#else
#define N 10000L
#define T 10000L
#endif
#define NUM_FP_OPS 4
/* Define our arrays */
double A[2][N];
double total=0; double sum_err_sqr=0;
long int chtotal=0;
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y) {
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
return x->tv_sec < y->tv_sec;
}
int main(int argc, char * argv[]) {
long int t, i, j, k;
const int BASE = 1024;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0;
long count=0;
printf("Number of points = %ld\t|Number of timesteps = %ld\t", N, T);
/* Initialization */
srand(42); // seed with a constant value to verify results
for (i = 0; i < N; i++) {
A[0][i] = 1.0 * (rand() % BASE);
}
#ifdef TIME
gettimeofday(&start, 0);
#endif
#undef T
#define T 5000
/* Copyright (C) 1991-2012 Free Software Foundation, Inc.
This file is part of the GNU C Library.
The GNU C Library is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 2.1 of the License, or (at your option) any later version.
The GNU C Library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public
License along with the GNU C Library; if not, see
<http://www.gnu.org/licenses/>. */
/* This header is separate from features.h so that the compiler can
include it implicitly at the start of every compilation. It must
not itself include <features.h> or any other header that includes
<features.h> because the implicit include comes before any feature
test macros that may be defined in a source file before it first
explicitly includes a system header. GCC knows the name of this
header in order to preinclude it. */
/* We do support the IEC 559 math functionality, real and complex. */
/* wchar_t uses ISO/IEC 10646 (2nd ed., published 2011-03-15) /
Unicode 6.0. */
/* We do not support C11 <threads.h>. */
int t1, t2, t3, t4, t5;
int lb, ub, lbp, ubp, lb2, ub2;
register int lbv, ubv;
/* Start of CLooG code */
if ((N >= 1) && (T >= 1)) {
for (t1=0;t1<=floord(258*T+N-258,256);t1++) {
lbp=max(ceild(t1-127,129),t1-T+1);
ubp=min(floord(2*t1+N,258),t1);
#pragma omp parallel for private(lbv,ubv,t3,t4,t5)
for (t2=lbp;t2<=ubp;t2++) {
if ((N >= 2) && (t1 <= floord(258*t2-N+255,2)) && (t1 >= 129*t2)) {
A[1][0] = 0.125 * ((0 == N-1)?0:A[0][0 +1] - 2.0*A[0][0] + (0 == 0)?0:A[0][0 -1]);;
for (t4=2*t1-2*t2+1;t4<=2*t1-2*t2+N-1;t4++) {
A[1][(-2*t1+2*t2+t4)] = 0.125 * (((-2*t1+2*t2+t4) == N-1)?0:A[0][(-2*t1+2*t2+t4)+1] - 2.0*A[0][(-2*t1+2*t2+t4)] + ((-2*t1+2*t2+t4) == 0)?0:A[0][(-2*t1+2*t2+t4)-1]);;
A[0][(-2*t1+2*t2+t4-1)] = 0.125 * (((-2*t1+2*t2+t4-1) == N-1)?0:A[1][(-2*t1+2*t2+t4-1)+1] - 2.0*A[1][(-2*t1+2*t2+t4-1)] + ((-2*t1+2*t2+t4-1) == 0)?0:A[1][(-2*t1+2*t2+t4-1)-1]);;
}
A[0][(N-1)] = 0.125 * (((N-1) == N-1)?0:A[1][(N-1)+1] - 2.0*A[1][(N-1)] + ((N-1) == 0)?0:A[1][(N-1)-1]);;
}
if (t1 >= max(ceild(258*t2-N+256,2),129*t2)) {
A[1][0] = 0.125 * ((0 == N-1)?0:A[0][0 +1] - 2.0*A[0][0] + (0 == 0)?0:A[0][0 -1]);;
for (t4=2*t1-2*t2+1;t4<=256*t2+255;t4++) {
A[1][(-2*t1+2*t2+t4)] = 0.125 * (((-2*t1+2*t2+t4) == N-1)?0:A[0][(-2*t1+2*t2+t4)+1] - 2.0*A[0][(-2*t1+2*t2+t4)] + ((-2*t1+2*t2+t4) == 0)?0:A[0][(-2*t1+2*t2+t4)-1]);;
A[0][(-2*t1+2*t2+t4-1)] = 0.125 * (((-2*t1+2*t2+t4-1) == N-1)?0:A[1][(-2*t1+2*t2+t4-1)+1] - 2.0*A[1][(-2*t1+2*t2+t4-1)] + ((-2*t1+2*t2+t4-1) == 0)?0:A[1][(-2*t1+2*t2+t4-1)-1]);;
}
}
if (N == 1) {
A[1][0] = 0.125 * ((0 == N-1)?0:A[0][0 +1] - 2.0*A[0][0] + (0 == 0)?0:A[0][0 -1]);;
A[0][0] = 0.125 * ((0 == N-1)?0:A[1][0 +1] - 2.0*A[1][0] + (0 == 0)?0:A[1][0 -1]);;
}
if ((t1 <= min(floord(258*t2-N+255,2),129*t2-1)) && (t1 >= ceild(258*t2-N+1,2))) {
for (t4=256*t2;t4<=2*t1-2*t2+N-1;t4++) {
A[1][(-2*t1+2*t2+t4)] = 0.125 * (((-2*t1+2*t2+t4) == N-1)?0:A[0][(-2*t1+2*t2+t4)+1] - 2.0*A[0][(-2*t1+2*t2+t4)] + ((-2*t1+2*t2+t4) == 0)?0:A[0][(-2*t1+2*t2+t4)-1]);;
A[0][(-2*t1+2*t2+t4-1)] = 0.125 * (((-2*t1+2*t2+t4-1) == N-1)?0:A[1][(-2*t1+2*t2+t4-1)+1] - 2.0*A[1][(-2*t1+2*t2+t4-1)] + ((-2*t1+2*t2+t4-1) == 0)?0:A[1][(-2*t1+2*t2+t4-1)-1]);;
}
A[0][(N-1)] = 0.125 * (((N-1) == N-1)?0:A[1][(N-1)+1] - 2.0*A[1][(N-1)] + ((N-1) == 0)?0:A[1][(N-1)-1]);;
}
if ((t1 >= ceild(258*t2-N+256,2)) && (t1 <= 129*t2-1)) {
for (t4=256*t2;t4<=256*t2+255;t4++) {
A[1][(-2*t1+2*t2+t4)] = 0.125 * (((-2*t1+2*t2+t4) == N-1)?0:A[0][(-2*t1+2*t2+t4)+1] - 2.0*A[0][(-2*t1+2*t2+t4)] + ((-2*t1+2*t2+t4) == 0)?0:A[0][(-2*t1+2*t2+t4)-1]);;
A[0][(-2*t1+2*t2+t4-1)] = 0.125 * (((-2*t1+2*t2+t4-1) == N-1)?0:A[1][(-2*t1+2*t2+t4-1)+1] - 2.0*A[1][(-2*t1+2*t2+t4-1)] + ((-2*t1+2*t2+t4-1) == 0)?0:A[1][(-2*t1+2*t2+t4-1)-1]);;
}
}
if (2*t1 == 258*t2-N) {
if ((256*t1+257*N)%258 == 0) {
A[0][(N-1)] = 0.125 * (((N-1) == N-1)?0:A[1][(N-1)+1] - 2.0*A[1][(N-1)] + ((N-1) == 0)?0:A[1][(N-1)-1]);;
}
}
}
}
}
/* End of CLooG code */
#undef T
#define T 10000
#ifdef TIME
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double)(result.tv_sec + result.tv_usec * 1.0e-6);
printf("|Time taken = %7.5lfs\t", tdiff);
printf("|MFLOPS = %f\n", ((((double)NUM_FP_OPS * N * T) / tdiff) / 1000000L));
#endif
#ifdef VERIFY
total=0;
for (i = 0; i < N; i++) {
total+= A[T%2][i] ;
}
printf("|sum: %e\t", total);
for (i = 0; i < N; i++) {
sum_err_sqr += (A[T%2][i] - (total/N))*(A[T%2][i] - (total/N));
}
printf("|rms(A) = %7.2f\t", sqrt(sum_err_sqr));
for (i = 0; i < N; i++) {
chtotal += ((char *)A[T%2])[i];
}
printf("|sum(rep(A)) = %ld\n", chtotal);
#endif
return 0;
}
// icc -O3 -fp-model precise heat_1d_np.c -o op-heat-1d-np -lm
// /* @ begin PrimeTile (num_tiling_levels=1; first_depth=1; last_depth=-1; boundary_tiling_level=-1;) @*/
// /* @ begin PrimeRegTile (scalar_replacement=0; T1t3=8; T1t4=8; ) @*/
// /* @ end @*/
// ,t2,t3,t4,t5,t6)
|
Homography.h | #pragma once
#include "saiga/vision/VisionTypes.h"
#include "saiga/vision/util/Ransac.h"
#include <array>
// This code here is inspired (and partially copied) from Colmap.
// https://github.com/colmap/colmap
namespace Saiga
{
/**
* Calculates a 3x3 homography matrix H so that
* targetPoints[i] = H * sourcePoints[i]
* This mapping is in 2d projective space -> H is up to a scale
*/
SAIGA_VISION_API Mat3 homography(ArrayView<const Vec2> points1, ArrayView<const Vec2> points2);
/**
* The transformation error for a corresponding point pair.
*/
inline double homographyResidual(const Vec2& p1, const Vec2& p2, const Mat3& H)
{
Vec3 p = H * p1.homogeneous();
double invz = 1.0 / p(2);
Vec2 res(p2(0) - p(0) * invz, p2(1) - p(1) * invz);
return res.squaredNorm();
}
#if 0
// solves H = aK * [R|t] for [R|t]
CameraExtrinsics getExtrinsicsFromHomography(const CameraIntrinsics& camera, const mat3d_t& H);
// solves H = a[R|t] for [R|t]
CameraExtrinsics getExtrinsicsFromHomography(const mat3d_t& H);
mat3d_t homographyRANSAC(const std::vector<vec2d_t>& sourcePoints, const std::vector<vec2d_t>& targetPoints,
std::vector<int>& outInliers, int numIterations = 1000, double inlierThreshold = 5,
int numSamples = 4);
#endif
class SAIGA_VISION_API HomographyRansac : public RansacBase<HomographyRansac, Mat3, 4>
{
using Base = RansacBase<HomographyRansac, Mat3, 4>;
using Model = Mat3;
public:
HomographyRansac(const RansacParameters& params) : Base(params) {}
int solve(ArrayView<const Vec2> _points1, ArrayView<const Vec2> _points2, Mat3& bestH)
{
points1 = _points1;
points2 = _points2;
int idx;
#pragma omp parallel num_threads(params.threads)
{
idx = compute(points1.size());
}
bestH = models[idx];
return numInliers[idx];
}
bool computeModel(const Subset& set, Model& model)
{
std::array<Vec2, 4> p1;
std::array<Vec2, 4> p2;
for (auto i : Range(0, (int)set.size()))
{
p1[i] = points1[set[i]];
p2[i] = points2[set[i]];
}
model = homography(p1, p2);
return true;
}
double computeResidual(const Model& model, int i) { return homographyResidual(points1[i], points2[i], model); }
ArrayView<const Vec2> points1;
ArrayView<const Vec2> points2;
};
} // namespace Saiga
|
generator.h | // Copyright (c) 2015, The Regents of the University of California (Regents)
// See LICENSE.txt for license details
#ifndef GENERATOR_H_
#define GENERATOR_H_
#include <algorithm>
#include <cinttypes>
#include <random>
#include "graph.h"
#include "pvector.h"
#include "util.h"
/*
GAP Benchmark Suite
Class: Generator
Author: Scott Beamer
Given scale and degree, generates edgelist for synthetic graph
- Intended to be called from Builder
- GenerateEL(uniform) generates and returns the edgelist
- Can generate uniform random (uniform=true) or R-MAT graph according
to Graph500 parameters (uniform=false)
- Can also randomize weights within a weighted edgelist (InsertWeights)
- Blocking/reseeding is for parallelism with deterministic output edgelist
*/
template <typename NodeID_, typename DestID_ = NodeID_,
typename WeightT_ = NodeID_>
class Generator {
typedef EdgePair<NodeID_, DestID_> Edge;
typedef EdgePair<NodeID_, NodeWeight<NodeID_, WeightT_>> WEdge;
typedef pvector<Edge> EdgeList;
public:
Generator(int scale, int degree) {
scale_ = scale;
num_nodes_ = 1l << scale;
num_edges_ = num_nodes_ * degree;
if (num_nodes_ > (std::numeric_limits<NodeID_>::max)()) {
std::cout << "NodeID type (max: " << (std::numeric_limits<NodeID_>::max)();
std::cout << ") too small to hold " << num_nodes_ << std::endl;
std::cout << "Recommend changing NodeID (typedef'd in src/benchmark.h)";
std::cout << " to a wider type and recompiling" << std::endl;
std::exit(-31);
}
}
void PermuteIDs(EdgeList &el) {
pvector<NodeID_> permutation(num_nodes_);
std::mt19937 rng(kRandSeed);
#pragma omp parallel for
for (NodeID_ n=0; n < num_nodes_; n++)
permutation[n] = n;
shuffle(permutation.begin(), permutation.end(), rng);
#pragma omp parallel for
for (int64_t e=0; e < num_edges_; e++)
el[e] = Edge(permutation[el[e].u], permutation[el[e].v]);
}
EdgeList MakeUniformEL() {
EdgeList el(num_edges_);
#pragma omp parallel
{
std::mt19937 rng;
std::uniform_int_distribution<NodeID_> udist(0, num_nodes_-1);
#pragma omp for
for (int64_t block=0; block < num_edges_; block+=block_size) {
rng.seed(kRandSeed + block/block_size);
for (int64_t e=block; e < (std::min)(block+block_size, num_edges_); e++) {
el[e] = Edge(udist(rng), udist(rng));
}
}
}
return el;
}
EdgeList MakeRMatEL() {
const float A = 0.57f, B = 0.19f, C = 0.19f;
EdgeList el(num_edges_);
#pragma omp parallel
{
std::mt19937 rng;
std::uniform_real_distribution<float> udist(0, 1.0f);
#pragma omp for
for (int64_t block=0; block < num_edges_; block+=block_size) {
rng.seed(kRandSeed + block/block_size);
for (int64_t e=block; e < (std::min)(block+block_size, num_edges_); e++) {
NodeID_ src = 0, dst = 0;
for (int depth=0; depth < scale_; depth++) {
float rand_point = udist(rng);
src = src << 1;
dst = dst << 1;
if (rand_point < A+B) {
if (rand_point > A)
dst++;
} else {
src++;
if (rand_point > A+B+C)
dst++;
}
}
el[e] = Edge(src, dst);
}
}
}
PermuteIDs(el);
// TIME_PRINT("Shuffle", std::shuffle(el.begin(), el.end(),
// std::mt19937()));
return el;
}
EdgeList GenerateEL(bool uniform) {
EdgeList el;
Timer t;
t.Start();
if (uniform)
el = MakeUniformEL();
else
el = MakeRMatEL();
t.Stop();
PrintTime("Generate Time", t.Seconds());
return el;
}
static void InsertWeights(pvector<EdgePair<NodeID_, NodeID_>> &el) {}
// Overwrites existing weights with random from [1,255]
static void InsertWeights(pvector<WEdge> &el) {
#pragma omp parallel
{
std::mt19937 rng;
std::uniform_int_distribution<int> udist(1, 255);
int64_t el_size = el.size();
#pragma omp for
for (int64_t block=0; block < el_size; block+=block_size) {
rng.seed(kRandSeed + block/block_size);
for (int64_t e=block; e < std::min(block+block_size, el_size); e++) {
el[e].v.w = static_cast<WeightT_>(udist(rng));
}
}
}
}
private:
int scale_;
int64_t num_nodes_;
int64_t num_edges_;
static const int64_t block_size = 1<<18;
};
#endif // GENERATOR_H_
|
office_fmt_plug.c | /* Office 2007 cracker patch for JtR. Hacked together during March of 2012 by
* Dhiru Kholia <dhiru.kholia at gmail.com> */
#if FMT_EXTERNS_H
extern struct fmt_main fmt_office;
#elif FMT_REGISTERS_H
john_register_one(&fmt_office);
#else
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <errno.h>
#include "aes.h"
#ifdef _OPENMP
#include <omp.h>
#ifndef OMP_SCALE
#define OMP_SCALE 4
#endif
#endif
#include "arch.h"
#include "misc.h"
#include "common.h"
#include "formats.h"
#include "params.h"
#include "options.h"
#include "unicode.h"
#include "sha.h"
#include "sha2.h"
#include "johnswap.h"
#include "office_common.h"
#include "simd-intrinsics.h"
#include "memdbg.h"
//#undef SIMD_COEF_32
//#undef SIMD_COEF_64
#define FORMAT_LABEL "Office"
#define FORMAT_NAME "2007/2010/2013"
#define ALGORITHM_NAME "SHA1 " SHA1_ALGORITHM_NAME " / SHA512 " SHA512_ALGORITHM_NAME " AES"
#define BENCHMARK_COMMENT ""
#define BENCHMARK_LENGTH -1
#define PLAINTEXT_LENGTH 125
#define BINARY_SIZE 16
#define SALT_SIZE sizeof(*cur_salt)
#define BINARY_ALIGN 4
#define SALT_ALIGN sizeof(int)
#ifdef SIMD_COEF_32
#define GETPOS_1(i, index) ( (index&(SIMD_COEF_32-1))*4 + ((i)&(0xffffffff-3))*SIMD_COEF_32 + (3-((i)&3)) + (unsigned int)index/SIMD_COEF_32*SHA_BUF_SIZ*SIMD_COEF_32*4 )
#define GETPOS_512(i, index) ( (index&(SIMD_COEF_64-1))*8 + ((i)&(0xffffffff-7))*SIMD_COEF_64 + (7-((i)&7)) + (unsigned int)index/SIMD_COEF_64*SHA_BUF_SIZ*SIMD_COEF_64*8 )
#define SHA1_LOOP_CNT (SIMD_COEF_32*SIMD_PARA_SHA1)
#define SHA512_LOOP_CNT (SIMD_COEF_64 * SIMD_PARA_SHA512)
#define MIN_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_SHA1 * SIMD_PARA_SHA512)
#define MAX_KEYS_PER_CRYPT (SIMD_COEF_32 * SIMD_PARA_SHA1 * SIMD_PARA_SHA512)
#else
#define SHA1_LOOP_CNT 1
#define SHA512_LOOP_CNT 1
#define MIN_KEYS_PER_CRYPT 1
#define MAX_KEYS_PER_CRYPT 1
#endif
static struct fmt_tests office_tests[] = {
{"$office$*2007*20*128*16*8b2c9e8c878844fc842012273be4bea8*aa862168b80d8c45c852696a8bb499eb*a413507fabe2d87606595f987f679ff4b5b4c2cd", "Password"},
/* 2007-Default_myhovercraftisfullofeels_.docx */
{"$office$*2007*20*128*16*91f095a1fd02595359fe3938fa9236fd*e22668eb1347957987175079e980990f*659f50b9062d36999bf3d0911068c93268ae1d86", "myhovercraftisfullofeels"},
/* 2007-Default_myhovercraftisfullofeels_.dotx */
{"$office$*2007*20*128*16*56ea65016fbb4eac14a6770b2dbe7e99*8cf82ce1b62f01fd3b2c7666a2313302*21443fe938177e648c482da72212a8848c2e9c80", "myhovercraftisfullofeels"},
/* 2007-Default_myhovercraftisfullofeels_.xlsb */
{"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*3a040a9cef3d3675009b22f99718e39c*48053b27e95fa53b3597d48ca4ad41eec382e0c8", "myhovercraftisfullofeels"},
/* 2007-Default_myhovercraftisfullofeels_.xlsm */
{"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*92bb2ef34ca662ca8a26c8e2105b05c0*0261ba08cd36a324aa1a70b3908a24e7b5a89dd6", "myhovercraftisfullofeels"},
/* 2007-Default_myhovercraftisfullofeels_.xlsx */
{"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*46bef371486919d4bffe7280110f913d*b51af42e6696baa097a7109cebc3d0ff7cc8b1d8", "myhovercraftisfullofeels"},
/* 2007-Default_myhovercraftisfullofeels_.xltx */
{"$office$*2007*20*128*16*fbd4cc5dab9b8e341778ddcde9eca740*1addb6823689aca9ce400be8f9e55fc9*e06bf10aaf3a4049ffa49dd91cf9e7bbf88a1b3b", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.docx */
{"$office$*2010*100000*128*16*213aefcafd9f9188e78c1936cbb05a44*d5fc7691292ab6daf7903b9a8f8c8441*46bfac7fb87cd43bd0ab54ebc21c120df5fab7e6f11375e79ee044e663641d5e", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.dotx */
{"$office$*2010*100000*128*16*0907ec6ecf82ede273b7ee87e44f4ce5*d156501661638cfa3abdb7fdae05555e*4e4b64e12b23f44d9a8e2e00196e582b2da70e5e1ab4784384ad631000a5097a", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.xlsb */
{"$office$*2010*100000*128*16*71093d08cf950f8e8397b8708de27c1f*00780eeb9605c7e27227c5619e91dc21*90aaf0ea5ccc508e699de7d62c310f94b6798ae77632be0fc1a0dc71600dac38", "myhovercraftisfullofeels"},
/* 2010-Default_myhovercraftisfullofeels_.xlsx */
{"$office$*2010*100000*128*16*71093d08cf950f8e8397b8708de27c1f*ef51883a775075f30d2207e87987e6a3*a867f87ea955d15d8cb08dc8980c04bf564f8af060ab61bf7fa3543853e0d11a", "myhovercraftisfullofeels"},
/* 2013-openwall.pptx */
{"$office$*2013*100000*256*16*9b12805dd6d56f46d07315153f3ecb9c*c5a4a167b51faa6629f6a4caf0b4baa8*87397e0659b2a6fff90291f8e6d6d0018b750b792fefed77001edbafba7769cd", "openwall"},
/* 365-2013-openwall.docx */
{"$office$*2013*100000*256*16*774a174239a7495a59cac39a122d991c*b2f9197840f9e5d013f95a3797708e83*ecfc6d24808691aac0daeaeba72aba314d72c6bbd12f7ff0ea1a33770187caef", "openwall"},
/* 365-2013-password.docx */
{"$office$*2013*100000*256*16*d4fc9302eedabf9872b24ca700a5258b*7c9554d582520747ec3e872f109a7026*1af5b5024f00e35eaf5fd8148b410b57e7451a32898acaf14275a8c119c3a4fd", "password"},
/* 365-2013-password.xlsx */
{"$office$*2013*100000*256*16*59b49c64c0d29de733f0025837327d50*70acc7946646ea300fc13cfe3bd751e2*627c8bdb7d9846228aaea81eeed434d022bb93bb5f4da146cb3ad9d847de9ec9", "password"},
/* 365-2013-strict-password.docx */
{"$office$*2013*100000*256*16*f1c23049d85876e6b20e95ab86a477f1*13303dbd27a38ea86ef11f1b2bc56225*9a69596de0655a6c6a5b2dc4b24d6e713e307fb70af2d6b67b566173e89f941d", "password"},
/* Max password length data, 125 bytes. Made with pass_gen.pl */
{"$office$*2007*20*128*16*7268323350556e527671367031526263*54344b786a6967615052493837496735*96c9d7cc44e81971aadfe81cce88cb8b00000000", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"},
{"$office$*2010*100000*128*16*42624931633777446c67354e34686e64*73592fdc2ecb12cd8dcb3ca2cec852bd*82f7315701818a7150ed7a7977717d0b56dcd1bc27e40a23dee6287a6ed55f9b", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"},
{"$office$*2013*100000*256*16*36537a3373756b587632386d77665362*c5958bd6177be548ce33d99f8e4fd7a7*43baa9dfab09a7e54b9d719dbe5187f1f7b55d7b761361fe1f60c85b044aa125", "12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345"},
{NULL}
};
static ms_office_custom_salt *cur_salt;
#define MS_OFFICE_2007_ITERATIONS 50000
#if defined (_OPENMP)
static int omp_t = 1;
#endif
/* Password encoded in UCS-2 */
static UTF16 (*saved_key)[PLAINTEXT_LENGTH + 1];
/* UCS-2 password length, in octets */
static int *saved_len;
static ARCH_WORD_32 (*crypt_key)[4];
static int *cracked;
/* Office 2010/2013 */
static const unsigned char encryptedVerifierHashInputBlockKey[] = { 0xfe, 0xa7, 0xd2, 0x76, 0x3b, 0x4b, 0x9e, 0x79 };
static const unsigned char encryptedVerifierHashValueBlockKey[] = { 0xd7, 0xaa, 0x0f, 0x6d, 0x30, 0x61, 0x34, 0x4e };
static unsigned char *DeriveKey(unsigned char *hashValue, unsigned char *X1)
{
int i;
unsigned char derivedKey[64];
SHA_CTX ctx;
// This is step 4a in 2.3.4.7 of MS_OFFCRYPT version 1.0
// and is required even though the notes say it should be
// used only when the encryption algorithm key > hash length.
for (i = 0; i < 64; i++)
derivedKey[i] = (i < 20 ? 0x36 ^ hashValue[i] : 0x36);
SHA1_Init(&ctx);
SHA1_Update(&ctx, derivedKey, 64);
SHA1_Final(X1, &ctx);
if (cur_salt->verifierHashSize > cur_salt->keySize/8)
return X1;
/* TODO: finish up this function */
//for (i = 0; i < 64; i++)
// derivedKey[i] = (i < 30 ? 0x5C ^ hashValue[i] : 0x5C);
fprintf(stderr, "\n\n*** ERROR: DeriveKey() entered Limbo.\n");
fprintf(stderr, "Please report to john-dev mailing list.\n");
error();
return NULL;
}
#ifdef SIMD_COEF_32
static void GeneratePasswordHashUsingSHA1(int idx, unsigned char final[SHA1_LOOP_CNT][20])
{
unsigned char hashBuf[20];
/* H(0) = H(salt, password)
* hashBuf = SHA1Hash(salt, password);
* create input buffer for SHA1 from salt and unicode version of password */
unsigned char X1[20];
SHA_CTX ctx;
unsigned char _IBuf[64*SHA1_LOOP_CNT+MEM_ALIGN_CACHE], *keys;
uint32_t *keys32;
unsigned i, j;
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);
keys32 = (uint32_t*)keys;
memset(keys, 0, 64*SHA1_LOOP_CNT);
for (i = 0; i < SHA1_LOOP_CNT; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->osalt, cur_salt->saltSize);
SHA1_Update(&ctx, saved_key[idx+i], saved_len[idx+i]);
SHA1_Final(hashBuf, &ctx);
/* Generate each hash in turn
* H(n) = H(i, H(n-1))
* hashBuf = SHA1Hash(i, hashBuf); */
// Create a byte array of the integer and put at the front of the input buffer
// 1.3.6 says that little-endian byte ordering is expected
for (j = 4; j < 24; ++j)
keys[GETPOS_1(j, i)] = hashBuf[j-4];
keys[GETPOS_1(j, i)] = 0x80;
// 24 bytes of crypt data (192 bits).
keys[GETPOS_1(63, i)] = 192;
}
// we do 1 less than actual number of iterations here.
for (i = 0; i < MS_OFFICE_2007_ITERATIONS-1; i++) {
for (j = 0; j < SHA1_LOOP_CNT; ++j) {
keys[GETPOS_1(0, j)] = i&0xff;
keys[GETPOS_1(1, j)] = i>>8;
}
// Here we output to 4 bytes past start of input buffer.
SIMDSHA1body(keys, &keys32[SIMD_COEF_32], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
}
// last iteration is output to start of input buffer, then 32 bit 0 appended.
// but this is still ends up being 24 bytes of crypt data.
for (j = 0; j < SHA1_LOOP_CNT; ++j) {
keys[GETPOS_1(0, j)] = i&0xff;
keys[GETPOS_1(1, j)] = i>>8;
}
SIMDSHA1body(keys, keys32, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
// Finally, append "block" (0) to H(n)
// hashBuf = SHA1Hash(hashBuf, 0);
for (i = 0; i < SIMD_PARA_SHA1; ++i)
memset(&keys[GETPOS_1(23,i*SIMD_COEF_32)], 0, 4*SIMD_COEF_32);
SIMDSHA1body(keys, keys32, NULL, SSEi_MIXED_IN|SSEi_FLAT_OUT);
// Now convert back into a 'flat' value, which is a flat array.
for (i = 0; i < SHA1_LOOP_CNT; ++i)
memcpy(final[i], DeriveKey(&keys[20*i], X1), cur_salt->keySize/8);
}
#else
// for non MMX, SHA1_LOOP_CNT is 1
static void GeneratePasswordHashUsingSHA1(int idx, unsigned char final[SHA1_LOOP_CNT][20])
{
unsigned char hashBuf[20], *key;
UTF16 *passwordBuf=saved_key[idx];
int passwordBufSize=saved_len[idx];
/* H(0) = H(salt, password)
* hashBuf = SHA1Hash(salt, password);
* create input buffer for SHA1 from salt and unicode version of password */
unsigned int inputBuf[(0x14 + 0x04 + 4) / sizeof(int)];
unsigned char X1[20];
int i;
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->osalt, cur_salt->saltSize);
SHA1_Update(&ctx, passwordBuf, passwordBufSize);
SHA1_Final(hashBuf, &ctx);
/* Generate each hash in turn
* H(n) = H(i, H(n-1))
* hashBuf = SHA1Hash(i, hashBuf); */
// Create a byte array of the integer and put at the front of the input buffer
// 1.3.6 says that little-endian byte ordering is expected
memcpy(&inputBuf[1], hashBuf, 20);
for (i = 0; i < MS_OFFICE_2007_ITERATIONS; i++) {
#if ARCH_LITTLE_ENDIAN
*inputBuf = i;
#else
*inputBuf = JOHNSWAP(i);
#endif
// 'append' the previously generated hash to the input buffer
SHA1_Init(&ctx);
SHA1_Update(&ctx, inputBuf, 0x14 + 0x04);
SHA1_Final((unsigned char*)&inputBuf[1], &ctx);
}
// Finally, append "block" (0) to H(n)
// hashBuf = SHA1Hash(hashBuf, 0);
memset(&inputBuf[6], 0, 4);
SHA1_Init(&ctx);
SHA1_Update(&ctx, &inputBuf[1], 0x14 + 0x04);
SHA1_Final(hashBuf, &ctx);
key = DeriveKey(hashBuf, X1);
// Should handle the case of longer key lengths as shown in 2.3.4.9
// Grab the key length bytes of the final hash as the encrypytion key
memcpy(final[0], key, cur_salt->keySize/8);
}
#endif
#ifdef SIMD_COEF_32
static void GenerateAgileEncryptionKey(int idx, unsigned char hashBuf[SHA1_LOOP_CNT][64])
{
unsigned char tmpBuf[20];
int hashSize = cur_salt->keySize >> 3;
unsigned i, j;
SHA_CTX ctx;
unsigned char _IBuf[64*SHA1_LOOP_CNT+MEM_ALIGN_CACHE], *keys,
_OBuf[20*SHA1_LOOP_CNT+MEM_ALIGN_CACHE];
uint32_t *keys32, (*crypt)[20/4];
crypt = (void*)mem_align(_OBuf, MEM_ALIGN_CACHE);
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);
keys32 = (uint32_t*)keys;
memset(keys, 0, 64*SHA1_LOOP_CNT);
for (i = 0; i < SHA1_LOOP_CNT; ++i) {
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->osalt, cur_salt->saltSize);
SHA1_Update(&ctx, saved_key[idx+i], saved_len[idx+i]);
SHA1_Final(tmpBuf, &ctx);
for (j = 4; j < 24; ++j)
keys[GETPOS_1(j, i)] = tmpBuf[j-4];
keys[GETPOS_1(j, i)] = 0x80;
// 24 bytes of crypt data (192 bits).
keys[GETPOS_1(63, i)] = 192;
}
// we do 1 less than actual number of iterations here.
for (i = 0; i < cur_salt->spinCount-1; i++) {
for (j = 0; j < SHA1_LOOP_CNT; ++j) {
keys[GETPOS_1(0, j)] = i&0xff;
keys[GETPOS_1(1, j)] = (i>>8)&0xff;
keys[GETPOS_1(2, j)] = i>>16;
}
// Here we output to 4 bytes past start of input buffer.
SIMDSHA1body(keys, &keys32[SIMD_COEF_32], NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
}
// last iteration is output to start of input buffer, then 32 bit 0 appended.
// but this is still ends up being 24 bytes of crypt data.
for (j = 0; j < SHA1_LOOP_CNT; ++j) {
keys[GETPOS_1(0, j)] = i&0xff;
keys[GETPOS_1(1, j)] = (i>>8)&0xff;
keys[GETPOS_1(2, j)] = i>>16;
}
SIMDSHA1body(keys, keys32, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
// Finally, append "block" (0) to H(n)
for (i = 0; i < SHA1_LOOP_CNT; ++i) {
for (j = 0; j < 8; ++j)
keys[GETPOS_1(20+j, i)] = encryptedVerifierHashInputBlockKey[j];
keys[GETPOS_1(20+j, i)] = 0x80;
// 28 bytes of crypt data (192 bits).
keys[GETPOS_1(63, i)] = 224;
}
SIMDSHA1body(keys, (ARCH_WORD_32*)crypt, NULL, SSEi_MIXED_IN|SSEi_FLAT_OUT);
for (i = 0; i < SHA1_LOOP_CNT; ++i)
memcpy(hashBuf[i], crypt[i], 20);
// And second "block" (0) to H(n)
for (i = 0; i < SHA1_LOOP_CNT; ++i) {
for (j = 0; j < 8; ++j)
keys[GETPOS_1(20+j, i)] = encryptedVerifierHashValueBlockKey[j];
}
SIMDSHA1body(keys, (ARCH_WORD_32*)crypt, NULL, SSEi_MIXED_IN|SSEi_FLAT_OUT);
for (i = 0; i < SHA1_LOOP_CNT; ++i)
memcpy(&hashBuf[i][32], crypt[i], 20);
// Fix up the size per the spec
if (20 < hashSize) { // FIXME: Is this ever true?
for (i = 0; i < SHA1_LOOP_CNT; ++i) {
for(j = 20; j < hashSize; j++) {
hashBuf[i][j] = 0x36;
hashBuf[i][32 + j] = 0x36;
}
}
}
}
#else
static void GenerateAgileEncryptionKey(int idx, unsigned char hashBuf[SHA1_LOOP_CNT][64])
{
/* H(0) = H(salt, password)
* hashBuf = SHA1Hash(salt, password);
* create input buffer for SHA1 from salt and unicode version of password */
UTF16 *passwordBuf=saved_key[idx];
int passwordBufSize=saved_len[idx];
int hashSize = cur_salt->keySize >> 3;
unsigned int inputBuf[(28 + 4) / sizeof(int)];
unsigned int i;
SHA_CTX ctx;
SHA1_Init(&ctx);
SHA1_Update(&ctx, cur_salt->osalt, cur_salt->saltSize);
SHA1_Update(&ctx, passwordBuf, passwordBufSize);
SHA1_Final(hashBuf[0], &ctx);
/* Generate each hash in turn
* H(n) = H(i, H(n-1))
* hashBuf = SHA1Hash(i, hashBuf); */
// Create a byte array of the integer and put at the front of the input buffer
// 1.3.6 says that little-endian byte ordering is expected
memcpy(&inputBuf[1], hashBuf[0], 20);
for (i = 0; i < cur_salt->spinCount; i++) {
#if ARCH_LITTLE_ENDIAN
*inputBuf = i;
#else
*inputBuf = JOHNSWAP(i);
#endif
// 'append' the previously generated hash to the input buffer
SHA1_Init(&ctx);
SHA1_Update(&ctx, inputBuf, 0x14 + 0x04);
SHA1_Final((unsigned char*)&inputBuf[1], &ctx);
}
// Finally, append "block" (0) to H(n)
memcpy(&inputBuf[6], encryptedVerifierHashInputBlockKey, 8);
SHA1_Init(&ctx);
SHA1_Update(&ctx, &inputBuf[1], 28);
SHA1_Final(hashBuf[0], &ctx);
// And second "block" (0) to H(n)
memcpy(&inputBuf[6], encryptedVerifierHashValueBlockKey, 8);
SHA1_Init(&ctx);
SHA1_Update(&ctx, &inputBuf[1], 28);
SHA1_Final(&hashBuf[0][32], &ctx);
// Fix up the size per the spec
if (20 < hashSize) { // FIXME: Is this ever true?
for(i = 20; i < hashSize; i++) {
hashBuf[0][i] = 0x36;
hashBuf[0][32 + i] = 0x36;
}
}
}
#endif
#ifdef SIMD_COEF_64
static void GenerateAgileEncryptionKey512(int idx, unsigned char hashBuf[SHA512_LOOP_CNT][128])
{
unsigned char tmpBuf[64];
unsigned int i, j, k;
SHA512_CTX ctx;
unsigned char _IBuf[128*SHA512_LOOP_CNT+MEM_ALIGN_CACHE], *keys,
_OBuf[64*SHA512_LOOP_CNT+MEM_ALIGN_CACHE];
ARCH_WORD_64 *keys64, (*crypt)[64/8];
uint32_t *keys32, *crypt32;
crypt = (void*)mem_align(_OBuf, MEM_ALIGN_CACHE);
keys = (unsigned char*)mem_align(_IBuf, MEM_ALIGN_CACHE);
keys64 = (ARCH_WORD_64*)keys;
keys32 = (uint32_t*)keys;
crypt32 = (uint32_t*)crypt;
memset(keys, 0, 128*SHA512_LOOP_CNT);
for (i = 0; i < SHA512_LOOP_CNT; ++i) {
SHA512_Init(&ctx);
SHA512_Update(&ctx, cur_salt->osalt, cur_salt->saltSize);
SHA512_Update(&ctx, saved_key[idx+i], saved_len[idx+i]);
SHA512_Final(tmpBuf, &ctx);
for (j = 4; j < 68; ++j)
keys[GETPOS_512(j, i)] = tmpBuf[j-4];
keys[GETPOS_512(j, i)] = 0x80;
// 68 bytes of crypt data (0x220 bits).
keys[GETPOS_512(127, i)] = 0x20;
keys[GETPOS_512(126, i)] = 0x02;
}
// we do 1 less than actual number of iterations here.
for (i = 0; i < cur_salt->spinCount-1; i++) {
unsigned int i_be = JOHNSWAP(i);
// Iteration counter in first 4 bytes
for (j = 0; j < SHA512_LOOP_CNT; j++)
keys32[(j&(SIMD_COEF_64-1))*2 + j/SIMD_COEF_64*2*SHA_BUF_SIZ*SIMD_COEF_64 + 1] = i_be;
SIMDSHA512body(keys, (ARCH_WORD_64*)crypt, NULL, SSEi_MIXED_IN);
// Then we output to 4 bytes past start of input buffer.
for (j = 0; j < SHA512_LOOP_CNT; j++) {
uint32_t *o = keys32 + (j&(SIMD_COEF_64-1))*2 + j/SIMD_COEF_64*2*SHA_BUF_SIZ*SIMD_COEF_64;
uint32_t *in = crypt32 + (j&(SIMD_COEF_64-1))*2 + j/SIMD_COEF_64*2*8*SIMD_COEF_64;
for (k = 0; k < 8; k++) {
o[0] = in[1];
o += SIMD_COEF_64*2;
o[1] = in[0];
in += SIMD_COEF_64*2;
}
}
}
// last iteration is output to start of input buffer, then 32 bit 0 appended.
// but this is still ends up being 24 bytes of crypt data.
for (j = 0; j < SHA512_LOOP_CNT; ++j) {
keys[GETPOS_512(0, j)] = i&0xff;
keys[GETPOS_512(1, j)] = (i>>8)&0xff;
keys[GETPOS_512(2, j)] = i>>16;
}
SIMDSHA512body(keys, keys64, NULL, SSEi_MIXED_IN|SSEi_OUTPUT_AS_INP_FMT);
// Finally, append "block" (0) to H(n)
for (i = 0; i < SHA512_LOOP_CNT; ++i) {
for (j = 0; j < 8; ++j)
keys[GETPOS_512(64+j, i)] = encryptedVerifierHashInputBlockKey[j];
keys[GETPOS_512(64+j, i)] = 0x80;
// 72 bytes of crypt data (0x240 we already have 0x220 here)
keys[GETPOS_512(127, i)] = 0x40;
}
SIMDSHA512body(keys, (ARCH_WORD_64*)crypt, NULL, SSEi_MIXED_IN|SSEi_FLAT_OUT);
for (i = 0; i < SHA512_LOOP_CNT; ++i)
memcpy((ARCH_WORD_64*)(hashBuf[i]), crypt[i], 64);
// And second "block" (0) to H(n)
for (i = 0; i < SHA512_LOOP_CNT; ++i) {
for (j = 0; j < 8; ++j)
keys[GETPOS_512(64+j, i)] = encryptedVerifierHashValueBlockKey[j];
}
SIMDSHA512body(keys, (ARCH_WORD_64*)crypt, NULL, SSEi_MIXED_IN|SSEi_FLAT_OUT);
for (i = 0; i < SHA512_LOOP_CNT; ++i)
memcpy((ARCH_WORD_64*)(&hashBuf[i][64]), crypt[i], 64);
}
#else
static void GenerateAgileEncryptionKey512(int idx, unsigned char hashBuf[SHA512_LOOP_CNT][128])
{
UTF16 *passwordBuf=saved_key[idx];
int passwordBufSize=saved_len[idx];
unsigned int inputBuf[128 / sizeof(int)];
int i;
SHA512_CTX ctx;
SHA512_Init(&ctx);
SHA512_Update(&ctx, cur_salt->osalt, cur_salt->saltSize);
SHA512_Update(&ctx, passwordBuf, passwordBufSize);
SHA512_Final(hashBuf[0], &ctx);
// Create a byte array of the integer and put at the front of the input buffer
// 1.3.6 says that little-endian byte ordering is expected
memcpy(&inputBuf[1], hashBuf, 64);
for (i = 0; i < cur_salt->spinCount; i++) {
#if ARCH_LITTLE_ENDIAN
*inputBuf = i;
#else
*inputBuf = JOHNSWAP(i);
#endif
// 'append' the previously generated hash to the input buffer
SHA512_Init(&ctx);
SHA512_Update(&ctx, inputBuf, 64 + 0x04);
SHA512_Final((unsigned char*)&inputBuf[1], &ctx);
}
// Finally, append "block" (0) to H(n)
memcpy(&inputBuf[68/4], encryptedVerifierHashInputBlockKey, 8);
SHA512_Init(&ctx);
SHA512_Update(&ctx, &inputBuf[1], 64 + 8);
SHA512_Final(hashBuf[0], &ctx);
// And second "block" (0) to H(n)
memcpy(&inputBuf[68/4], encryptedVerifierHashValueBlockKey, 8);
SHA512_Init(&ctx);
SHA512_Update(&ctx, &inputBuf[1], 64 + 8);
SHA512_Final(&hashBuf[0][64], &ctx);
}
#endif
static void init(struct fmt_main *self)
{
#if defined (_OPENMP)
omp_t = omp_get_max_threads();
self->params.min_keys_per_crypt *= omp_t;
omp_t *= OMP_SCALE;
self->params.max_keys_per_crypt *= omp_t;
#endif
saved_key = mem_calloc(sizeof(*saved_key), self->params.max_keys_per_crypt);
saved_len = mem_calloc(sizeof(*saved_len), self->params.max_keys_per_crypt);
crypt_key = mem_calloc(sizeof(*crypt_key), self->params.max_keys_per_crypt);
cracked = mem_calloc(sizeof(*cracked), self->params.max_keys_per_crypt);
if (options.target_enc == UTF_8)
self->params.plaintext_length = MIN(125, PLAINTEXT_LENGTH * 3);
}
static void done(void)
{
MEM_FREE(cracked);
MEM_FREE(crypt_key);
MEM_FREE(saved_len);
MEM_FREE(saved_key);
}
static void set_salt(void *salt)
{
cur_salt = (ms_office_custom_salt *)salt;
}
static int crypt_all(int *pcount, struct db_salt *salt)
{
const int count = *pcount;
int index = 0, inc = SHA1_LOOP_CNT;
if (cur_salt->version == 2013)
inc = SHA512_LOOP_CNT;
#ifdef _OPENMP
#pragma omp parallel for
#endif
for (index = 0; index < count; index+=inc)
{
int i;
if(cur_salt->version == 2007) {
unsigned char encryptionKey[SHA1_LOOP_CNT][20];
GeneratePasswordHashUsingSHA1(index, encryptionKey);
for (i = 0; i < SHA1_LOOP_CNT; ++i)
ms_office_common_PasswordVerifier(cur_salt, encryptionKey[i], crypt_key[index+i]);
}
else if (cur_salt->version == 2010) {
unsigned char verifierKeys[SHA1_LOOP_CNT][64], decryptedVerifierHashInputBytes[16], decryptedVerifierHashBytes[32];
unsigned char hash[20];
SHA_CTX ctx;
GenerateAgileEncryptionKey(index, verifierKeys);
for (i = 0; i < inc; ++i) {
ms_office_common_DecryptUsingSymmetricKeyAlgorithm(cur_salt, verifierKeys[i], cur_salt->encryptedVerifier, decryptedVerifierHashInputBytes, 16);
ms_office_common_DecryptUsingSymmetricKeyAlgorithm(cur_salt, &verifierKeys[i][32], cur_salt->encryptedVerifierHash, decryptedVerifierHashBytes, 32);
SHA1_Init(&ctx);
SHA1_Update(&ctx, decryptedVerifierHashInputBytes, 16);
SHA1_Final(hash, &ctx);
cracked[index+i] = !memcmp(hash, decryptedVerifierHashBytes, 20);
}
}
else if (cur_salt->version == 2013) {
unsigned char verifierKeys[SHA512_LOOP_CNT][128], decryptedVerifierHashInputBytes[16], decryptedVerifierHashBytes[32];
unsigned char hash[64];
SHA512_CTX ctx;
GenerateAgileEncryptionKey512(index, verifierKeys);
for (i = 0; i < inc; ++i) {
ms_office_common_DecryptUsingSymmetricKeyAlgorithm(cur_salt, verifierKeys[i], cur_salt->encryptedVerifier, decryptedVerifierHashInputBytes, 16);
ms_office_common_DecryptUsingSymmetricKeyAlgorithm(cur_salt, &verifierKeys[i][64], cur_salt->encryptedVerifierHash, decryptedVerifierHashBytes, 32);
SHA512_Init(&ctx);
SHA512_Update(&ctx, decryptedVerifierHashInputBytes, 16);
SHA512_Final(hash, &ctx);
cracked[index+i] = !memcmp(hash, decryptedVerifierHashBytes, 20);
}
}
}
return count;
}
static int cmp_all(void *binary, int count)
{
int index;
if (cur_salt->version == 2007) {
for (index = 0; index < count; index++) {
if ( ((ARCH_WORD_32*)binary)[0] == crypt_key[index][0] )
return 1;
}
return 0;
}
for (index = 0; index < count; index++)
if (cracked[index])
return 1;
return 0;
}
static int cmp_one(void *binary, int index)
{
if (cur_salt->version == 2007) {
return !memcmp(binary, crypt_key[index], BINARY_SIZE);
}
return cracked[index];
}
static int cmp_exact(char *source, int index)
{
return 1;
}
static int get_hash_0(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_0; }
static int get_hash_1(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_1; }
static int get_hash_2(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_2; }
static int get_hash_3(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_3; }
static int get_hash_4(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_4; }
static int get_hash_5(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_5; }
static int get_hash_6(int index) { if (cur_salt->version!=2007) return 0; return crypt_key[index][0] & PH_MASK_6; }
static void office_set_key(char *key, int index)
{
/* convert key to UTF-16LE */
saved_len[index] = enc_to_utf16(saved_key[index], PLAINTEXT_LENGTH, (UTF8*)key, strlen(key));
if (saved_len[index] < 0)
saved_len[index] = strlen16(saved_key[index]);
saved_len[index] <<= 1;
}
static char *get_key(int index)
{
return (char*)utf16_to_enc(saved_key[index]);
}
/*
* MS Office version (2007, 2010, 2013) as first tunable cost
*/
static unsigned int ms_office_version(void *salt)
{
ms_office_custom_salt *my_salt;
my_salt = salt;
return (unsigned int) my_salt->version;
}
struct fmt_main fmt_office = {
{
FORMAT_LABEL,
FORMAT_NAME,
ALGORITHM_NAME,
BENCHMARK_COMMENT,
BENCHMARK_LENGTH,
0,
PLAINTEXT_LENGTH,
BINARY_SIZE,
BINARY_ALIGN,
SALT_SIZE,
SALT_ALIGN,
MIN_KEYS_PER_CRYPT,
MAX_KEYS_PER_CRYPT,
FMT_CASE | FMT_8_BIT | FMT_OMP | FMT_UNICODE | FMT_UTF8,
{
"MS Office version",
"iteration count",
},
office_tests
}, {
init,
done,
fmt_default_reset,
fmt_default_prepare,
ms_office_common_valid_all,
fmt_default_split,
ms_office_common_binary,
ms_office_common_get_salt,
{
ms_office_version,
ms_office_common_iteration_count,
},
fmt_default_source,
{
fmt_default_binary_hash_0,
fmt_default_binary_hash_1,
fmt_default_binary_hash_2,
fmt_default_binary_hash_3,
fmt_default_binary_hash_4,
fmt_default_binary_hash_5,
fmt_default_binary_hash_6
},
fmt_default_salt_hash,
NULL,
set_salt,
office_set_key,
get_key,
fmt_default_clear_keys,
crypt_all,
{
get_hash_0,
get_hash_1,
get_hash_2,
get_hash_3,
get_hash_4,
get_hash_5,
get_hash_6
},
cmp_all,
cmp_one,
cmp_exact
}
};
#endif /* plugin stanza */
|
vednnLinearBackwardWeight.c |
#include <stdint.h>
#include "vednnLinearBackwardWeight.h"
#ifdef VEDNN_USE_OPENMP
#include <omp.h>
extern int __vednn_omp_num_threads ;
#endif
static inline vednnError_t
vednnLinearBackwardWeight_wrapper(
vednnLinearBackwardWeight_t pFunc,
const uint64_t inDim,
const uint64_t outDim,
const uint64_t nBatch,
const void * pDataIn,
const void * pDataGradOut,
void * pDataGradWeight
) {
vednnError_t rc = VEDNN_SUCCESS ;
#pragma omp parallel reduction(|:rc)
{
int64_t nthreads = omp_get_num_threads() ;
int64_t threadid = omp_get_thread_num() ;
int64_t nInDim = inDim / nthreads ;
int64_t remain = inDim % nthreads ;
int64_t inDimBegin = nInDim * threadid + ( threadid < remain ? threadid : remain ) ;
int64_t myInDim = nInDim + ( threadid < remain ? 1 : 0 ) ;
if( nInDim == 0 ) {
rc |= VEDNN_SUCCESS;
} else {
rc |= pFunc(inDim, outDim, nBatch, pDataIn, pDataGradOut, pDataGradWeight,
inDimBegin, inDimBegin+myInDim) ;
}
}
return rc;
}
/* ----------------------------------------------------------------------- */
vednnError_t vednnLinearBackwardWeight(
const uint64_t inDim,
const uint64_t outDim,
const uint64_t nBatch,
const void * pDataIn,
const void * pDataGradOut,
void * pDataGradWeight
)
{
// [todo] add variations
{
return vednnLinearBackwardWeight_wrapper(
vednnLinearBackwardWeight_default,
inDim, outDim, nBatch,
pDataIn, pDataGradOut, pDataGradWeight ) ;
}
}
|
GB_unop__tanh_fc32_fc32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB (_unop_apply__tanh_fc32_fc32)
// op(A') function: GB (_unop_tran__tanh_fc32_fc32)
// C type: GxB_FC32_t
// A type: GxB_FC32_t
// cast: GxB_FC32_t cij = aij
// unaryop: cij = ctanhf (aij)
#define GB_ATYPE \
GxB_FC32_t
#define GB_CTYPE \
GxB_FC32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
GxB_FC32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = ctanhf (x) ;
// casting
#define GB_CAST(z, aij) \
GxB_FC32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
GxB_FC32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
GxB_FC32_t z = aij ; \
Cx [pC] = ctanhf (z) ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TANH || GxB_NO_FC32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_apply__tanh_fc32_fc32)
(
GxB_FC32_t *Cx, // Cx and Ax may be aliased
const GxB_FC32_t *Ax,
const int8_t *restrict Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ctanhf (z) ;
}
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
GxB_FC32_t aij = Ax [p] ;
GxB_FC32_t z = aij ;
Cx [p] = ctanhf (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB (_unop_tran__tanh_fc32_fc32)
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
fill.c | #ifdef __cplusplus
extern "C" {
#endif
extern void CXX_Fill_alpha(const unsigned int row, const unsigned int col, const double a, const double v[], void *ptr);
extern void CXX_Fill(const unsigned int row, const unsigned int col, const double v[], void *ptr);
extern void CXX_Fill_boundary(const unsigned int row, const unsigned int col, const unsigned int n, const double v[], void *ptr);
extern void CXX_Fill_diagonal(const unsigned int row, const unsigned int col, const double v, void *ptr);
extern void CXX_Fill_Reset(void *ptr);
#ifdef __cplusplus
}
#endif
#include <stddef.h>
#include <string.h>
#include <stdint.h>
#include <omp.h>
#include <math.h>
#include "geometry.h"
#include "bench.h"
#include "phy.h"
#include "core_kernel.h"
//static void fill_alpha(
// const unsigned int row,
// const unsigned int col,
// const double a,
// const double v[],
// const unsigned int bsz2,
// const unsigned int *ia,
// const unsigned int *ja,
// double *aa)
//{
// uint32_t i;
// for(i = ia[row]; i < ia[row+1]; i++)
// {
// if(ja[i] == col)
// {
// uint32_t j;
// for(j = 0; j < bsz2; j++) aa[bsz2 * i + j] += a * v[j];
//
// break;
// }
// }
//}
//
//static void fill(
// const unsigned int row,
// const unsigned int col,
// const double v[],
// const unsigned int bsz2,
// const unsigned int *ia,
// const unsigned int *ja,
// double *aa)
//{
// uint32_t i;
// for(i = ia[row]; i < ia[row+1]; i++)
// {
// if(ja[i] == col)
// {
// uint32_t j;
// for(j = 0; j < bsz2; j++) aa[bsz2 * i + j] += v[j];
//
// break;
// }
// }
//}
//
//static void fill_boundary(
// const unsigned int row,
// const unsigned int col,
// const double v[],
// const unsigned int n,
// const unsigned int bsz2,
// const unsigned int *ia,
// const unsigned int *ja,
// double *aa)
//{
// uint32_t i;
// for(i = ia[row]; i < ia[row+1]; i++)
// {
// if(ja[i] == col)
// {
// uint32_t j;
// for(j = 1; j <= n; j++) aa[bsz2 * i + (j * n + j)] += v[j-1];
//
// break;
// }
// }
//}
//
//static void fill_diagonal(
// const unsigned int row,
// const unsigned int col,
// const double v,
// const unsigned int bsz2,
// const unsigned int *ia,
// const unsigned int *ja,
// double *aa)
//{
// uint32_t i;
// for(i = ia[row]; i < ia[row+1]; i++)
// {
// if(ja[i] == col)
// {
// aa[bsz2 * i + 0] += v;
// aa[bsz2 * i + 5] += v;
// aa[bsz2 * i + 10] += v;
// aa[bsz2 * i + 15] += v;
//
// break;
// }
// }
//}
static void
_KRN_ComputeA(
const size_t nnodes,
const size_t nsnodes,
const size_t nfnodes,
const uint32_t bsz,
const uint32_t *nsptr,
const uint32_t *nfptr,
const double *s_xyz0,
const double *s_xyz1,
const double *s_xyz2,
const double *f_xyz0,
const double *f_xyz1,
const double *f_xyz2,
const uint32_t *ie,
const uint32_t *part,
const uint32_t *n0,
const uint32_t *n1,
const double *x0,
const double *x1,
const double *x2,
const double *x3,
const double *q,
const double *cdt,
void *matrix)//,
//const size_t nnz,
//const uint32_t bsz2,
//const uint32_t *ia,
//const uint32_t *ja,
//double *aa)
{
//memset(aa, 0, nnz * sizeof(double));
CXX_Fill_Reset(matrix);
#pragma omp parallel
{
uint32_t i;
#pragma omp for
for(i = 0; i < nnodes; i++)
{
// Store in the diagonal of the block
//fill_diagonal(i, i, cdt[i], bsz2, ia, ja, aa);
CXX_Fill_diagonal(i, i, cdt[i], matrix);
}
#pragma omp barrier
uint32_t t = (uint32_t) omp_get_thread_num();
uint32_t ie0 = ie[t];
uint32_t ie1 = ie[t+1];
for(i = ie0; i < ie1; i++)
{
const uint32_t node0 = n0[i];
const uint32_t node1 = n1[i];
const double xn = x0[i];
const double yn = x1[i];
const double zn = x2[i];
const double ln = x3[i];
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double dot = xn;
double X1, Y1, Z1;
if(fabs(dot) < 0.95f)
{
X1 = 1.f - dot * xn;
Y1 = - dot * yn;
Z1 = - dot * zn;
}
else
{
dot = yn;
X1 = - dot * xn;
Y1 = 1.f - dot * yn;
Z1 = - dot * zn;
}
/* Normalize the first vector */
double size = X1 * X1;
size += Y1 * Y1;
size += Z1 * Z1;
size = sqrt(size);
X1 /= size;
Y1 /= size;
Z1 /= size;
/* Take cross-product of normal and V1 to get V2 */
double X2 = yn * Z1;
X2 -= zn * Y1;
double Y2 = zn * X1;
Y2 -= xn * Z1;
double Z2 = xn * Y1;
Z2 -= yn * X1;
/* Variables on left */
// Velocity u
double uL = q[bsz * node0 + 1];
// Velocity v
double vL = q[bsz * node0 + 2];
// Velocity w
double wL = q[bsz * node0 + 3];
double ubarL = xn * uL;
ubarL += yn * vL;
ubarL += zn * wL;
/* Variables on right */
// Velocity u
double uR = q[bsz * node1 + 1];
// Velocity v
double vR = q[bsz * node1 + 2];
// Velocity w
double wR = q[bsz * node1 + 3];
double ubarR = xn * uR;
ubarR += yn * vR;
ubarR += zn * wR;
/*
Now compute eigenvalues and |A| from averaged variables
Avergage variables
*/
double u = 0.5f * (uL + uR);
double v = 0.5f * (vL + vR);
double w = 0.5f * (wL + wR);
double ubar = xn * u;
ubar += yn * v;
ubar += zn * w;
double c2 = ubar * ubar + B;
double c = sqrt(c2);
/* Put in the eigenvalue smoothing stuff */
double eig1 = ln * fabs(ubar);
double eig2 = ln * fabs(ubar);
double eig3 = ln * fabs(ubar + c);
double eig4 = ln * fabs(ubar - c);
double phi1 = xn * B;
phi1 += u * ubar;
double phi2 = yn * B;
phi2 += v * ubar;
double phi3 = zn * B;
phi3 += w * ubar;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
/* Components of T(inverse) (call this y) */
double c2inv = 1.f / c2;
double y11 = u * phi4;
y11 += v * phi5;
y11 += w * phi6;
y11 = -c2inv * y11 / B;
double y21 = u * phi7;
y21 += v * phi8;
y21 += w * phi9;
y21 = -c2inv * y21 / B;
double y31 = c2inv * (c - ubar);
y31 = 0.5f * y31 / B;
double y41 = c2inv * (c + ubar);
y41 = -0.5f * y41 / B;
double y12 = c2inv * phi4;
double y22 = c2inv * phi7;
double y32 = c2inv * 0.5f * xn;
double y42 = c2inv * 0.5f * xn;
double y13 = c2inv * phi5;
double y23 = c2inv * phi8;
double y33 = c2inv * 0.5f * yn;
double y43 = c2inv * 0.5f * yn;
double y14 = c2inv * phi6;
double y24 = c2inv * phi9;
double y34 = c2inv * 0.5f * zn;
double y44 = c2inv * 0.5f * zn;
/* Now get elements of T */
double t13 = c * B;
double t23 = u * (ubar + c);
t23 += xn * B;
double t33 = v * (ubar + c);
t33 += yn * B;
double t43 = w * (ubar + c);
t43 += zn * B;
double t14 = -c * B;
double t24 = u * (ubar - c);
t24 += xn * B;
double t34 = v * (ubar - c);
t34 += yn * B;
double t44 = w * (ubar - c);
t44 += zn * B;
/* Compute T * |lambda| * T(inv) */
double a11 = eig3 * t13 * y31;
a11 += eig4 * t14 * y41;
double a12 = eig3 * t13 * y32;
a12 += eig4 * t14 * y42;
double a13 = eig3 * t13 * y33;
a13 += eig4 * t14 * y43;
double a14 = eig3 * t13 * y34;
a14 += eig4 * t14 * y44;
double a21 = eig1 * X1 * y11;
a21 += eig2 * X2 * y21;
a21 += eig3 * t23 * y31;
a21 += eig4 * t24 * y41;
double a22 = eig1 * X1 * y12;
a22 += eig2 * X2 * y22;
a22 += eig3 * t23 * y32;
a22 += eig4 * t24 * y42;
double a23 = eig1 * X1 * y13;
a23 += eig2 * X2 * y23;
a23 += eig3 * t23 * y33;
a23 += eig4 * t24 * y43;
double a24 = eig1 * X1 * y14;
a24 += eig2 * X2 * y24;
a24 += eig3 * t23 * y34;
a24 += eig4 * t24 * y44;
double a31 = eig1 * Y1 * y11;
a31 += eig2 * Y2 * y21;
a31 += eig3 * t33 * y31;
a31 += eig4 * t34 * y41;
double a32 = eig1 * Y1 * y12;
a32 += eig2 * Y2 * y22;
a32 += eig3 * t33 * y32;
a32 += eig4 * t34 * y42;
double a33 = eig1 * Y1 * y13;
a33 += eig2 * Y2 * y23;
a33 += eig3 * t33 * y33;
a33 += eig4 * t34 * y43;
double a34 = eig1 * Y1* y14;
a34 += eig2 * Y2 * y24;
a34 += eig3 * t33 * y34;
a34 += eig4 * t34 * y44;
double a41 = eig1 * Z1 * y11;
a41 += eig2 * Z2 * y21;
a41 += eig3 * t43 * y31;
a41 += eig4 * t44 * y41;
double a42 = eig1 * Z1 * y12;
a42 += eig2 * Z2 * y22;
a42 += eig3 * t43 * y32;
a42 += eig4 * t44 * y42;
double a43 = eig1 * Z1 * y13;
a43 += eig2 * Z2 * y23;
a43 += eig3 * t43 * y33;
a43 += eig4 * t44 * y43;
double a44 = eig1 * Z1 * y14;
a44 += eig2 * Z2 * y24;
a44 += eig3 * t43 * y34;
a44 += eig4 * t44 * y44;
/* Regular Jacobians on left: Form 0.5 * (A + |A|) */
double lb = ln * B;
double lx = ln * xn;
double ly = ln * yn;
double lz = ln * zn;
/* Regular Jaobians on left */
double v0[16];
v0[0] = 0.5f * a11;
v0[4] = 0.5f * (lx + a21);
v0[8] = 0.5f * (ly + a31);
v0[12] = 0.5f * (lz + a41);
v0[1] = 0.5f * ((lb * xn) + a12);
v0[5] = 0.5f * ((ln * (ubarL + xn * uL)) + a22);
v0[9] = 0.5f * ((lx * vL) + a32);
v0[13] = 0.5f * ((lx * wL) + a42);
v0[2] = 0.5f * ((lb * yn) + a13);
v0[6] = 0.5f * ((ly * uL) + a23);
v0[10] = 0.5f * ((ln * (ubarL + yn * vL)) + a33);
v0[14] = 0.5f * ((ly * wL) + a43);
v0[3] = 0.5f * ((lb * zn) + a14);
v0[7] = 0.5f * ((lz * uL) + a24);
v0[11] = 0.5f * ((lz * vL) + a34);
v0[15] = 0.5f * ((ln * (ubarL + zn * wL)) + a44);
/* Regular Jaobians on right */
double v1[16];
v1[0] = 0.5f * -a11;
v1[4] = 0.5f * (lx - a21);
v1[8] = 0.5f * (ly - a31);
v1[12] = 0.5f * (lz - a41);
v1[1] = 0.5f * ((lb * xn) - a12);
v1[5] = 0.5f * ((ln * (ubarR + xn * uR)) - a22);
v1[9] = 0.5f * ((lx * vR) - a32);
v1[13] = 0.5f * ((lx * wR) - a42);
v1[2] = 0.5f * ((lb * yn) - a13);
v1[6] = 0.5f * ((ly * uR) - a23);
v1[10] = 0.5f * ((ln * (ubarR + yn * vR)) - a33);
v1[14] = 0.5f * ((ly * wR) - a43);
v1[3] = 0.5f * ((lb * zn) - a14);
v1[7] = 0.5f * ((lz * uR) - a24);
v1[11] = 0.5f * ((lz * vR) - a34);
v1[15] = 0.5f * ((ln * (ubarR + zn * wR)) - a44);
if(part[node0] == t)
{
//fill(node0, node0, v0, bsz2, ia, ja, aa);
//fill(node0, node1, v1, bsz2, ia, ja, aa);
CXX_Fill(node0, node0, v0, matrix);
CXX_Fill(node0, node1, v1, matrix);
}
if(part[node1] == t)
{
//fill_alpha(node1, node0, -1.f, v0, bsz2, ia, ja, aa);
//fill_alpha(node1, node1, -1.f, v1, bsz2, ia, ja, aa);
CXX_Fill_alpha(node1, node0, -1.f, v0, matrix);
CXX_Fill_alpha(node1, node1, -1.f, v1, matrix);
}
}
#pragma omp barrier
#pragma omp for
for(i = 0; i < nsnodes; i++)
{
const double v[] = {s_xyz0[i], s_xyz1[i], s_xyz2[i]};
//fill_boundary(nsptr[i], nsptr[i], v, 3, bsz2, ia, ja, aa);
CXX_Fill_boundary(nsptr[i], nsptr[i], 3, v, matrix);
}
#pragma omp barrier
#pragma omp for
for(i = 0; i < nfnodes; i++)
{
uint32_t n = nfptr[i];
double xn = f_xyz0[i];
double yn = f_xyz1[i];
double zn = f_xyz2[i];
double ln = sqrt(xn * xn + yn * yn + zn * zn);
xn /= ln;
yn /= ln;
zn /= ln;
/* 9 FLOPS */
/*
Now lets get our other 2 vectors
For first vector, use {1,0,0} and subtract off the component
in the direction of the face normal. If the inner product of
{1,0,0} is close to unity, use {0,1,0}
*/
double dot = xn;
double X1, Y1, Z1;
if(fabs(dot) < 0.95f)
{
X1 = 1.f - dot * xn;
Y1 = - dot * yn;
Z1 = - dot * zn;
}
else
{
dot = yn;
X1 = - dot * xn;
Y1 = 1.f - dot * yn;
Z1 = - dot * zn;
}
/* 6 FLOPS */
/* Normalize the first vector (V1) */
double size = sqrt(X1 * X1 + Y1 * Y1 + Z1 * Z1);
X1 /= size;
Y1 /= size;
Z1 /= size;
/* 9 FLOPS */
/* Take cross-product of normal with V1 to get V2 */
double X2 = yn * Z1 - zn * Y1;
double Y2 = zn * X1 - xn * Z1;
double Z2 = xn * Y1 - yn * X1;
/* 9 FLOPS */
/* Calculate elements of T and T(inverse)
evaluated at freestream */
double ubar0 = xn * U;
ubar0 += yn * V;
ubar0 += zn * W;
double c20 = ubar0 * ubar0 + B;
double c0 = sqrt(c20);
double phi1 = xn * B;
phi1 += U * ubar0;
double phi2 = yn * B;
phi2 += V * ubar0;
double phi3 = zn * B;
phi3 += W * ubar0;
double phi4 = Y2 * phi3;
phi4 -= Z2 * phi2;
double phi5 = Z2 * phi1;
phi5 -= X2 * phi3;
double phi6 = X2 * phi2;
phi6 -= Y2 * phi1;
double phi7 = Z1 * phi2;
phi7 -= Y1 * phi3;
double phi8 = X1 * phi3;
phi8 -= Z1 * phi1;
double phi9 = Y1 * phi1;
phi9 -= X1 * phi2;
/* 9 * 3 + 8 FLOPS */
double t13 = c0 * B;
double t23 = U * (ubar0 + c0);
t23 += xn * B;
double t33 = V * (ubar0 + c0);
t33 += yn * B;
double t43 = W * (ubar0 + c0);
t43 += zn * B;
double t14 = -c0 * B;
double t24 = U * (ubar0 - c0);
t24 += xn * B;
double t34 = V * (ubar0 - c0);
t34 += yn * B;
double t44 = W * (ubar0 - c0);
t44 += zn * B;
double ti11 = U * phi4;
ti11 += V * phi5;
ti11 += W * phi6;
ti11 = -ti11 / B / c20;
double ti21 = U * phi7;
ti21 += V * phi8;
ti21 += W * phi9;
ti21 = -ti21 / B / c20;
double ti31 = (c0 - ubar0) / (2.f * B * c20);
double ti41 = -(c0 + ubar0) / (2.f * B * c20);
double ti12 = phi4 / c20;
double ti22 = phi7 / c20;
double ti32 = 0.5f * xn / c20;
double ti42 = 0.5f * xn / c20;
double ti13 = phi5 / c20;
double ti23 = phi8 / c20;
double ti33 = 0.5f * yn / c20;
double ti43 = 0.5f * yn / c20;
double ti14 = phi6 / c20;
double ti24 = phi9 / c20;
double ti34 = 0.5f * zn / c20;
double ti44 = 0.5f * zn / c20;
/* 27 + 16 + 9 + 6 + 6 + 6 FLOPS */
/* Now, get the variables on the "inside" */
double pi = q[bsz * n + 0];
double ui = q[bsz * n + 1];
double vi = q[bsz * n + 2];
double wi = q[bsz * n + 3];
double un = xn * ui;
un += yn * vi;
un += zn * wi;
/* 5 FLOPS */
/* If ubar is negative, take the reference
condition from outside */
double pr, prp, ur, uru, vr, vrv, wr, wrw;
if(un > 0.f)
{
pr = pi;
prp = 1.f;
ur = ui;
uru = 1.f;
vr = vi;
vrv = 1.f;
wr = wi;
wrw = 1.f;
}
else
{
pr = P;
prp = 0.f;
ur = U;
uru = 0.f;
vr = V;
vrv = 0.f;
wr = W;
wrw = 0.f;
}
/* Set rhs */
double rhs1 = ti11 * pr;
rhs1 += ti12 * ur;
rhs1 += ti13 * vr;
rhs1 += ti14 * wr;
double rhs1p = ti11 * prp;
double rhs1u = ti12 * uru;
double rhs1v = ti13 * vrv;
double rhs1w = ti14 * wrw;
double rhs2 = ti21 * pr;
rhs2 += ti22 * ur;
rhs2 += ti23 * vr;
rhs2 += ti24 * wr;
double rhs2p = ti21 * prp;
double rhs2u = ti22 * uru;
double rhs2v = ti23 * vrv;
double rhs2w = ti24 * wrw;
double rhs3 = ti31 * pi;
rhs3 += ti32 * ui;
rhs3 += ti33 * vi;
rhs3 += ti34 * wi;
double rhs4 = ti41 * P;
rhs4 += ti42 * U;
rhs4 += ti43 * V;
rhs4 += ti44 * W;
/* 12 + 24 FLOPS */
/* Now do matrix multiplication to get values on boundary */
double pb = t13 * rhs3;
pb += t14 * rhs4;
double pbp = t13 * ti31;
double pbu = t13 * ti32;
double pbv = t13 * ti33;
double pbw = t13 * ti34;
double ub = X1 * rhs1;
ub += X2 * rhs2;
ub += t23 * rhs3;
ub += t24 * rhs4;
double ubp = X1 * rhs1p;
ubp += X2 * rhs2p;
ubp += t23 * ti31;
double ubu = X1 * rhs1u;
ubu += X2 * rhs2u;
ubu += t23 * ti32;
double ubv = X1 * rhs1v;
ubv += X2 * rhs2v;
ubv += t23 * ti33;
double ubw = X1 * rhs1w;
ubw += X2 * rhs2w;
ubw += t23 * ti34;
double vb = Y1 * rhs1;
vb += Y2 * rhs2;
vb += t33 * rhs3;
vb += t34 * rhs4;
double vbp = Y1 * rhs1p;
vbp += Y2 * rhs2p;
vbp += t33 * ti31;
double vbu = Y1 * rhs1u;
vbu += Y2 * rhs2u;
vbu += t33 * ti32;
double vbv = Y1 * rhs1v;
vbv += Y2 * rhs2v;
vbv += t33 * ti33;
double vbw = Y1 * rhs1w;
vbw += Y2 * rhs2w;
vbw += t33 * ti34;
double wb = Z1 * rhs1;
wb += Z2 * rhs2;
wb += t43 * rhs3;
wb += t44 * rhs4;
double wbp = Z1 * rhs1p;
wbp += Z2 * rhs2p;
wbp += t43 * ti31;
double wbu = Z1 * rhs1u;
wbu += Z2 * rhs2u;
wbu += t43 * ti32;
double wbv = Z1 * rhs1v;
wbv += Z2 * rhs2v;
wbv += t43 * ti33;
double wbw = Z1 * rhs1w;
wbw += Z2 * rhs2w;
wbw += t43 * ti34;
/* 5 * 15 + 6 + 5 + 2 FLOPS */
double unb = xn * ub;
unb += yn * vb;
unb += zn * wb;
double unbp = xn * ubp;
unbp += yn * vbp;
unbp += zn * wbp;
double unbu = xn * ubu;
unbu += yn * vbu;
unbu += zn * wbu;
double unbv = xn * ubv;
unbv += yn * vbv;
unbv += zn * wbv;
double unbw = xn * ubw;
unbw += yn * vbw;
unbw += zn * wbw;
/* 5 * 5 FLOPS */
/* Now add contribution to lhs */
double v[16];
v[0] = ln * B * unbp;
v[4] = ln * (ub * unbp + unb * ubp + xn * pbp);
v[8] = ln * (vb * unbp + unb * vbp + yn * pbp);
v[12] = ln * (wb * unbp + unb * wbp + zn * pbp);
v[1] = ln * B * unbu;
v[5] = ln * (ub * unbu + unb * ubu + xn * pbu);
v[9] = ln * (vb * unbu + unb * vbu + yn * pbu);
v[13] = ln * (wb * unbu + unb * wbu + zn * pbu);
v[2] = ln * B * unbv;
v[6] = ln * (ub * unbv + unb * ubv + xn * pbv);
v[10] = ln * (vb * unbv + unb * vbv + yn * pbv);
v[14] = ln * (wb * unbv + unb * wbv + zn * pbv);
v[3] = ln * B * unbw;
v[7] = ln * (ub * unbw + unb * ubw + xn * pbw);
v[11] = ln * (vb * unbw + unb * vbw + yn * pbw);
v[15] = ln * (wb * unbw + unb * wbw + zn * pbw);
//fill(n, n, v, bsz2, ia, ja, aa);
CXX_Fill(n, n, v, matrix);
}
}
}
void
ComputeA(GEOMETRY *g)
{
BENCH start_bench = rdbench();
_KRN_ComputeA(
g->n->sz,
g->b->s->sz,
g->b->f->sz,
g->c->b,
g->b->s->nptr,
g->b->f->nptr,
g->b->s->xyz->x0,
g->b->s->xyz->x1,
g->b->s->xyz->x2,
g->b->f->xyz->x0,
g->b->f->xyz->x1,
g->b->f->xyz->x2,
g->s->i,
g->n->part,
g->e->eptr->n0,
g->e->eptr->n1,
g->e->xyzn->x0,
g->e->xyzn->x1,
g->e->xyzn->x2,
g->e->xyzn->x3,
g->q->q,
g->n->cdt,
g->matrix);//,
//g->c->mat->i[g->n->sz] * g->c->b2,
//g->c->b2,
//g->c->mat->i,
//g->c->mat->j,
//g->c->mat->a);
fun3d_log(start_bench, KERNEL_FLUX);
} |
LAGraph_BF_full2.c | //------------------------------------------------------------------------------
// LAGraph_BF_full2.c: Bellman-Ford single-source shortest paths, returns tree,
// while diagonal of input matrix A needs not to be explicit 0, using the
// frontier idea from Roi Lipman
//------------------------------------------------------------------------------
/*
LAGraph: graph algorithms based on GraphBLAS
Copyright 2019 LAGraph Contributors.
(see Contributors.txt for a full list of Contributors; see
ContributionInstructions.txt for information on how you can Contribute to
this project).
All Rights Reserved.
NO WARRANTY. THIS MATERIAL IS FURNISHED ON AN "AS-IS" BASIS. THE LAGRAPH
CONTRIBUTORS MAKE NO WARRANTIES OF ANY KIND, EITHER EXPRESSED OR IMPLIED,
AS TO ANY MATTER INCLUDING, BUT NOT LIMITED TO, WARRANTY OF FITNESS FOR
PURPOSE OR MERCHANTABILITY, EXCLUSIVITY, OR RESULTS OBTAINED FROM USE OF
THE MATERIAL. THE CONTRIBUTORS DO NOT MAKE ANY WARRANTY OF ANY KIND WITH
RESPECT TO FREEDOM FROM PATENT, TRADEMARK, OR COPYRIGHT INFRINGEMENT.
Released under a BSD license, please see the LICENSE file distributed with
this Software or contact permission@sei.cmu.edu for full terms.
Created, in part, with funding and support from the United States
Government. (see Acknowledgments.txt file).
This program includes and/or can make use of certain third party source
code, object code, documentation and other files ("Third Party Software").
See LICENSE file for more details.
*/
//------------------------------------------------------------------------------
// LAGraph_BF_full2: Bellman-Ford single source shortest paths, returning both
// the path lengths and the shortest-path tree. contributed by Jinhao Chen and
// Tim Davis, Texas A&M.
// LAGraph_BF_full2 performs a Bellman-Ford to find out shortest path, parent
// nodes along the path and the hops (number of edges) in the path from given
// source vertex s in the range of [0, n) on graph given as matrix A with size
// n*n. The sparse matrix A has entry A(i, j) if there is an edge from vertex i
// to vertex j with weight w, then A(i, j) = w.
// TODO: think about the return values
// LAGraph_BF_full2 returns GrB_SUCCESS if it succeeds. In this case, there
// are no negative-weight cycles in the graph, and d, pi, and h are returned.
// The vector d has d(k) as the shortest distance from s to k. pi(k) = p+1,
// where p is the parent node of k-th node in the shortest path. In particular,
// pi(s) = 0. h(k) = hop(s, k), the number of edges from s to k in the shortest
// path.
// If the graph has a negative-weight cycle, GrB_NO_VALUE is returned, and the
// GrB_Vectors d(k), pi(k) and h(k) (i.e., *pd_output, *ppi_output and
// *ph_output respectively) will be NULL when negative-weight cycle detected.
// Otherwise, other errors such as GrB_OUT_OF_MEMORY, GrB_INVALID_OBJECT, and
// so on, can be returned, if these errors are found by the underlying
// GrB_* functions.
//------------------------------------------------------------------------------
#include "BF_test.h"
#define LAGRAPH_FREE_WORK \
{ \
GrB_free(&d); \
GrB_free(&dtmp); \
GrB_free(&dfrontier); \
GrB_free(&Atmp); \
GrB_free(&BF_Tuple3); \
GrB_free(&BF_lMIN_Tuple3); \
GrB_free(&BF_PLUSrhs_Tuple3); \
GrB_free(&BF_EQ_Tuple3); \
GrB_free(&BF_lMIN_Tuple3_Monoid); \
GrB_free(&BF_lMIN_PLUSrhs_Tuple3); \
LAGRAPH_FREE (I); \
LAGRAPH_FREE (J); \
LAGRAPH_FREE (w); \
LAGRAPH_FREE (W); \
LAGRAPH_FREE (h); \
LAGRAPH_FREE (pi); \
}
#define LAGRAPH_FREE_ALL \
{ \
LAGRAPH_FREE_WORK \
GrB_free (pd_output); \
GrB_free (ppi_output); \
GrB_free (ph_output); \
}
//------------------------------------------------------------------------------
// data type for each entry of the adjacent matrix A and "distance" vector d;
// <INFINITY,INFINITY,INFINITY> corresponds to nonexistence of a path, and
// the value <0, 0, NULL> corresponds to a path from a vertex to itself
//------------------------------------------------------------------------------
typedef struct
{
double w; // w corresponds to a path weight.
GrB_Index h; // h corresponds to a path size or number of hops.
GrB_Index pi;// pi corresponds to the penultimate vertex along a path.
// vertex indexed as 1, 2, 3, ... , V, and pi = 0 (as nil)
// for u=v, and pi = UINT64_MAX (as inf) for (u,v) not in E
}
BF_Tuple3_struct;
//------------------------------------------------------------------------------
// 2 binary functions, z=f(x,y), where Tuple3xTuple3 -> Tuple3
//------------------------------------------------------------------------------
void BF_lMIN2
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
if (x->w < y->w
|| (x->w == y->w && x->h < y->h)
|| (x->w == y->w && x->h == y->h && x->pi < y->pi))
{
if (z != x) { *z = *x; }
}
else
{
*z = *y;
}
}
void BF_PLUSrhs2
(
BF_Tuple3_struct *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
z->w = x->w + y->w;
z->h = x->h + y->h;
if (x->pi != UINT64_MAX && y->pi != 0)
{
z->pi = y->pi;
}
else
{
z->pi = x->pi;
}
}
void BF_EQ
(
bool *z,
const BF_Tuple3_struct *x,
const BF_Tuple3_struct *y
)
{
if (x->w == y->w && x->h == y->h && x->pi == y->pi)
{
*z = true;
}
else
{
*z = false;
}
}
// Given a n-by-n adjacency matrix A and a source vertex s.
// If there is no negative-weight cycle reachable from s, return the distances
// of shortest paths from s and parents along the paths as vector d. Otherwise,
// returns d=NULL if there is a negtive-weight cycle.
// pd_output is pointer to a GrB_Vector, where the i-th entry is d(s,i), the
// sum of edges length in the shortest path
// ppi_output is pointer to a GrB_Vector, where the i-th entry is pi(i), the
// parent of i-th vertex in the shortest path
// ph_output is pointer to a GrB_Vector, where the i-th entry is h(s,i), the
// number of edges from s to i in the shortest path
// A has weights on corresponding entries of edges
// s is given index for source vertex
GrB_Info LAGraph_BF_full2
(
GrB_Vector *pd_output, //the pointer to the vector of distance
GrB_Vector *ppi_output, //the pointer to the vector of parent
GrB_Vector *ph_output, //the pointer to the vector of hops
const GrB_Matrix A, //matrix for the graph
const GrB_Index s //given index of the source
)
{
GrB_Info info;
// tmp vector to store distance vector after n (i.e., V) loops
GrB_Vector d = NULL, dtmp = NULL, dfrontier = NULL;
GrB_Matrix Atmp = NULL;
GrB_Type BF_Tuple3;
GrB_BinaryOp BF_lMIN_Tuple3;
GrB_BinaryOp BF_PLUSrhs_Tuple3;
GrB_BinaryOp BF_EQ_Tuple3;
GrB_Monoid BF_lMIN_Tuple3_Monoid;
GrB_Semiring BF_lMIN_PLUSrhs_Tuple3;
GrB_Index nrows, ncols, n, nz; // n = # of row/col, nz = # of nnz in graph
GrB_Index *I = NULL, *J = NULL; // for col/row indices of entries from A
GrB_Index *h = NULL, *pi = NULL;
double *w = NULL;
BF_Tuple3_struct *W = NULL;
if (A == NULL || pd_output == NULL ||
ppi_output == NULL || ph_output == NULL)
{
// required argument is missing
LAGRAPH_ERROR ("required arguments are NULL", GrB_NULL_POINTER) ;
}
*pd_output = NULL;
*ppi_output = NULL;
*ph_output = NULL;
LAGr_Matrix_nrows (&nrows, A) ;
LAGr_Matrix_ncols (&ncols, A) ;
LAGr_Matrix_nvals (&nz, A);
if (nrows != ncols)
{
// A must be square
LAGRAPH_ERROR ("A must be square", GrB_INVALID_VALUE) ;
}
n = nrows;
if (s >= n || s < 0)
{
LAGRAPH_ERROR ("invalid value for source vertex s", GrB_INVALID_VALUE);
}
//--------------------------------------------------------------------------
// create all GrB_Type GrB_BinaryOp GrB_Monoid and GrB_Semiring
//--------------------------------------------------------------------------
// GrB_Type
LAGr_Type_new(&BF_Tuple3, sizeof(BF_Tuple3_struct));
// GrB_BinaryOp
LAGr_BinaryOp_new(&BF_EQ_Tuple3,
(LAGraph_binary_function) (&BF_EQ), GrB_BOOL, BF_Tuple3, BF_Tuple3);
LAGr_BinaryOp_new(&BF_lMIN_Tuple3,
(LAGraph_binary_function) (&BF_lMIN2),
BF_Tuple3, BF_Tuple3, BF_Tuple3);
LAGr_BinaryOp_new(&BF_PLUSrhs_Tuple3,
(LAGraph_binary_function)(&BF_PLUSrhs2),
BF_Tuple3, BF_Tuple3, BF_Tuple3);
// GrB_Monoid
BF_Tuple3_struct BF_identity = (BF_Tuple3_struct) { .w = INFINITY,
.h = UINT64_MAX, .pi = UINT64_MAX };
LAGRAPH_OK(GrB_Monoid_new_UDT(&BF_lMIN_Tuple3_Monoid, BF_lMIN_Tuple3,
&BF_identity));
//GrB_Semiring
LAGr_Semiring_new(&BF_lMIN_PLUSrhs_Tuple3,
BF_lMIN_Tuple3_Monoid, BF_PLUSrhs_Tuple3);
//--------------------------------------------------------------------------
// allocate arrays used for tuplets
//--------------------------------------------------------------------------
I = LAGraph_malloc (nz, sizeof(GrB_Index)) ;
J = LAGraph_malloc (nz, sizeof(GrB_Index)) ;
w = LAGraph_malloc (nz, sizeof(double)) ;
W = LAGraph_malloc (nz, sizeof(BF_Tuple3_struct)) ;
if (I == NULL || J == NULL || w == NULL || W == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
//--------------------------------------------------------------------------
// create matrix Atmp based on A, while its entries become BF_Tuple3 type
//--------------------------------------------------------------------------
LAGRAPH_OK(GrB_Matrix_extractTuples_FP64(I, J, w, &nz, A));
int nthreads = LAGraph_get_nthreads ( ) ;
printf ("nthreads %d\n", nthreads) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (GrB_Index k = 0; k < nz; k++)
{
if (w[k] == 0) //diagonal entries
{
W[k] = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
}
else
{
W[k] = (BF_Tuple3_struct) { .w = w[k], .h = 1, .pi = I[k] + 1 };
}
}
LAGr_Matrix_new(&Atmp, BF_Tuple3, n, n);
LAGRAPH_OK(GrB_Matrix_build_UDT(Atmp, I, J, W, nz, BF_lMIN_Tuple3));
LAGRAPH_FREE (I);
LAGRAPH_FREE (J);
LAGRAPH_FREE (W);
LAGRAPH_FREE (w);
//--------------------------------------------------------------------------
// create and initialize "distance" vector d
//--------------------------------------------------------------------------
LAGr_Vector_new(&d, BF_Tuple3, n);
// initial distance from s to itself
BF_Tuple3_struct d0 = (BF_Tuple3_struct) { .w = 0, .h = 0, .pi = 0 };
LAGRAPH_OK(GrB_Vector_setElement_UDT(d, &d0, s));
//--------------------------------------------------------------------------
// start the Bellman Ford process
//--------------------------------------------------------------------------
// copy d to dtmp in order to create a same size of vector
LAGr_Vector_dup(&dtmp, d);
LAGr_Vector_dup(&dfrontier, d);
bool same= false; // variable indicating if d == dtmp
int64_t iter = 0; // number of iterations
// terminate when no new path is found or more than V-1 loops
while (!same && iter < n - 1)
{
// execute semiring on d and A, and save the result to dtmp
LAGr_vxm(dfrontier, GrB_NULL, GrB_NULL,
BF_lMIN_PLUSrhs_Tuple3, dfrontier, Atmp, GrB_NULL);
// dtmp[i] = min(d[i], dfrontier[i]).
GrB_eWiseAdd_Vector_BinaryOp(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_Tuple3,
d, dfrontier, GrB_NULL);
LAGRAPH_OK (LAGraph_Vector_isequal(&same, dtmp, d, BF_EQ_Tuple3));
if (!same)
{
GrB_Vector ttmp = dtmp;
dtmp = d;
d = ttmp;
}
iter ++;
}
// check for negative-weight cycle only when there was a new path in the
// last loop, otherwise, there can't be a negative-weight cycle.
if (!same)
{
// execute semiring again to check for negative-weight cycle
LAGr_vxm(dfrontier, GrB_NULL, GrB_NULL,
BF_lMIN_PLUSrhs_Tuple3, dfrontier, Atmp, GrB_NULL);
// dtmp[i] = min(d[i], dfrontier[i]).
GrB_eWiseAdd_Vector_BinaryOp(dtmp, GrB_NULL, GrB_NULL, BF_lMIN_Tuple3,
d, dfrontier, GrB_NULL);
// if d != dtmp, then there is a negative-weight cycle in the graph
LAGRAPH_OK (LAGraph_Vector_isequal(&same, dtmp, d, BF_EQ_Tuple3));
if (!same)
{
// printf("A negative-weight cycle found. \n");
LAGRAPH_FREE_ALL;
return (GrB_NO_VALUE) ;
}
}
//--------------------------------------------------------------------------
// extract tuple from "distance" vector d and create GrB_Vectors for output
//--------------------------------------------------------------------------
I = LAGraph_malloc (n, sizeof(GrB_Index)) ;
W = LAGraph_malloc (n, sizeof(BF_Tuple3_struct)) ;
w = LAGraph_malloc (n, sizeof(double)) ;
h = LAGraph_malloc (n, sizeof(GrB_Index)) ;
pi = LAGraph_malloc (n, sizeof(GrB_Index)) ;
if (I == NULL || W == NULL || w == NULL || h == NULL || pi == NULL)
{
LAGRAPH_ERROR ("out of memory", GrB_OUT_OF_MEMORY) ;
}
LAGRAPH_OK(GrB_Vector_extractTuples_UDT (I, (void *) W, &nz, d));
for (GrB_Index k = 0; k < nz; k++)
{
w [k] = W[k].w ;
h [k] = W[k].h ;
pi[k] = W[k].pi;
}
LAGr_Vector_new(pd_output, GrB_FP64, n);
LAGr_Vector_new(ppi_output, GrB_UINT64, n);
LAGr_Vector_new(ph_output, GrB_UINT64, n);
LAGr_Vector_build (*pd_output , I, w , nz,GrB_MIN_FP64 );
LAGr_Vector_build (*ppi_output, I, pi, nz,GrB_MIN_UINT64);
LAGr_Vector_build (*ph_output , I, h , nz,GrB_MIN_UINT64);
LAGRAPH_FREE_WORK;
return (GrB_SUCCESS) ;
}
|
GB_binop__times_uint32.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_mkl.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB_AaddB__times_uint32
// A.*B function (eWiseMult): GB_AemultB__times_uint32
// A*D function (colscale): GB_AxD__times_uint32
// D*A function (rowscale): GB_DxB__times_uint32
// C+=B function (dense accum): GB_Cdense_accumB__times_uint32
// C+=b function (dense accum): GB_Cdense_accumb__times_uint32
// C+=A+B function (dense ewise3): GB_Cdense_ewise3_accum__times_uint32
// C=A+B function (dense ewise3): GB_Cdense_ewise3_noaccum__times_uint32
// C=scalar+B GB_bind1st__times_uint32
// C=scalar+B' GB_bind1st_tran__times_uint32
// C=A+scalar GB_bind2nd__times_uint32
// C=A'+scalar GB_bind2nd_tran__times_uint32
// C type: uint32_t
// A type: uint32_t
// B,b type: uint32_t
// BinaryOp: cij = (aij * bij)
#define GB_ATYPE \
uint32_t
#define GB_BTYPE \
uint32_t
#define GB_CTYPE \
uint32_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
uint32_t aij = Ax [pA]
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB) \
uint32_t bij = Bx [pB]
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
uint32_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA) \
cij = Ax [pA]
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB) \
cij = Bx [pB]
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z, x, y) \
z = (x * y) ;
// op is second
#define GB_OP_IS_SECOND \
0
// op is plus_fp32 or plus_fp64
#define GB_OP_IS_PLUS_REAL \
0
// op is minus_fp32 or minus_fp64
#define GB_OP_IS_MINUS_REAL \
0
// GB_cblas_*axpy gateway routine, if it exists for this operator and type:
#define GB_CBLAS_AXPY \
(none)
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_UINT32 || GxB_NO_TIMES_UINT32)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB_Cdense_ewise3_accum__times_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_ewise3_noaccum__times_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumB__times_uint32
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB_Cdense_accumb__times_uint32
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type uint32_t
uint32_t bwork = (*((uint32_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_AxD__times_uint32
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *GB_RESTRICT kfirst_slice,
const int64_t *GB_RESTRICT klast_slice,
const int64_t *GB_RESTRICT pstart_slice,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_colscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB_DxB__times_uint32
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *GB_RESTRICT Cx = (uint32_t *) C->x ;
#include "GB_AxB_rowscale_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB_AaddB__times_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_add_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB_AemultB__times_uint32
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *GB_RESTRICT C_to_M,
const int64_t *GB_RESTRICT C_to_A,
const int64_t *GB_RESTRICT C_to_B,
const GB_task_struct *GB_RESTRICT TaskList,
const int ntasks,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB_bind1st__times_uint32
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t x = (*((uint32_t *) x_input)) ;
uint32_t *Bx = (uint32_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t bij = Bx [p] ;
Cx [p] = (x * bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB_bind2nd__times_uint32
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
uint32_t *Cx = (uint32_t *) Cx_output ;
uint32_t *Ax = (uint32_t *) Ax_input ;
uint32_t y = (*((uint32_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
uint32_t aij = Ax [p] ;
Cx [p] = (aij * y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (x * aij) ; \
}
GrB_Info GB_bind1st_tran__times_uint32
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t x = (*((const uint32_t *) x_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
uint32_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typcasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
uint32_t aij = Ax [pA] ; \
Cx [pC] = (aij * y) ; \
}
GrB_Info GB_bind2nd_tran__times_uint32
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
uint32_t y = (*((const uint32_t *) y_input)) ;
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
ark_brusselator1D_omp.c | /*---------------------------------------------------------------
* Programmer(s): Daniel R. Reynolds @ SMU
*---------------------------------------------------------------
* SUNDIALS Copyright Start
* Copyright (c) 2002-2021, Lawrence Livermore National Security
* and Southern Methodist University.
* All rights reserved.
*
* See the top-level LICENSE and NOTICE files for details.
*
* SPDX-License-Identifier: BSD-3-Clause
* SUNDIALS Copyright End
*---------------------------------------------------------------
* Example problem:
*
* The following test simulates a brusselator problem from chemical
* kinetics. This is n PDE system with 3 components, Y = [u,v,w],
* satisfying the equations,
* u_t = du*u_xx + a - (w+1)*u + v*u^2
* v_t = dv*v_xx + w*u - v*u^2
* w_t = dw*w_xx + (b-w)/ep - w*u
* for t in [0, 80], x in [0, 1], with initial conditions
* u(0,x) = a + 0.1*sin(pi*x)
* v(0,x) = b/a + 0.1*sin(pi*x)
* w(0,x) = b + 0.1*sin(pi*x),
* and with stationary boundary conditions, i.e.
* u_t(t,0) = u_t(t,1) = 0,
* v_t(t,0) = v_t(t,1) = 0,
* w_t(t,0) = w_t(t,1) = 0.
* Note: these can also be implemented as Dirichlet boundary
* conditions with values identical to the initial conditions.
*
* The spatial derivatives are computed using second-order
* centered differences, with the data distributed over N points
* on a uniform spatial grid.
*
* This program solves the problem with the DIRK method, using a
* Newton iteration with the band linear solver, and a
* user-supplied Jacobian routine. This example uses the OpenMP
* vector kernel, and employs OpenMP threading within the
* right-hand side and Jacobian construction functions.
*
* 100 outputs are printed at equal intervals, and run statistics
* are printed at the end.
*---------------------------------------------------------------*/
/* Header files */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <arkode/arkode_arkstep.h> /* prototypes for ARKStep fcts., consts */
#include <nvector/nvector_openmp.h> /* access to OpenMP N_Vector */
#include <sunmatrix/sunmatrix_band.h> /* access to band SUNMatrix */
#include <sunlinsol/sunlinsol_band.h> /* access to band SUNLinearSolver */
#include <sundials/sundials_types.h> /* def. of type 'realtype' */
#ifdef _OPENMP
#include <omp.h> /* OpenMP functions */
#endif
#if defined(SUNDIALS_EXTENDED_PRECISION)
#define GSYM "Lg"
#define ESYM "Le"
#define FSYM "Lf"
#else
#define GSYM "g"
#define ESYM "e"
#define FSYM "f"
#endif
/* accessor macros between (x,v) location and 1D NVector array */
#define IDX(x,v) (3*(x)+v)
/* user data structure */
typedef struct {
sunindextype N; /* number of intervals */
int nthreads; /* number of OpenMP threads */
realtype dx; /* mesh spacing */
realtype a; /* constant forcing on u */
realtype b; /* steady-state value of w */
realtype du; /* diffusion coeff for u */
realtype dv; /* diffusion coeff for v */
realtype dw; /* diffusion coeff for w */
realtype ep; /* stiffness parameter */
} *UserData;
/* User-supplied Functions Called by the Solver */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data);
static int Jac(realtype t, N_Vector y, N_Vector fy,
SUNMatrix J, void *user_data,
N_Vector tmp1, N_Vector tmp2, N_Vector tmp3);
/* Private helper functions */
static int LaplaceMatrix(realtype c, SUNMatrix Jac, UserData udata);
static int ReactionJac(realtype c, N_Vector y, SUNMatrix Jac, UserData udata);
/* Private function to check function return values */
static int check_flag(void *flagvalue, const char *funcname, int opt);
/* Main Program */
int main(int argc, char *argv[])
{
/* general problem parameters */
realtype T0 = RCONST(0.0); /* initial time */
realtype Tf = RCONST(10.0); /* final time */
int Nt = 100; /* total number of output times */
int Nvar = 3; /* number of solution fields */
UserData udata = NULL;
realtype *data;
sunindextype N = 201; /* spatial mesh size */
realtype a = 0.6; /* problem parameters */
realtype b = 2.0;
realtype du = 0.025;
realtype dv = 0.025;
realtype dw = 0.025;
realtype ep = 1.0e-5; /* stiffness parameter */
realtype reltol = 1.0e-6; /* tolerances */
realtype abstol = 1.0e-10;
sunindextype NEQ, i;
/* general problem variables */
int flag; /* reusable error-checking flag */
N_Vector y = NULL; /* empty vector for storing solution */
N_Vector umask = NULL; /* empty mask vectors for viewing solution components */
N_Vector vmask = NULL;
N_Vector wmask = NULL;
SUNMatrix A = NULL; /* empty matrix for linear solver */
SUNLinearSolver LS = NULL; /* empty linear solver structure */
void *arkode_mem = NULL; /* empty ARKode memory structure */
realtype pi, t, dTout, tout, u, v, w;
FILE *FID, *UFID, *VFID, *WFID;
int iout, num_threads;
long int nst, nst_a, nfe, nfi, nsetups, nje, nfeLS, nni, ncfn, netf;
/* allocate udata structure */
udata = (UserData) malloc(sizeof(*udata));
if (check_flag((void *) udata, "malloc", 2)) return 1;
/* set the number of threads to use */
num_threads = 1; /* default value */
#ifdef _OPENMP
num_threads = omp_get_max_threads(); /* overwrite with OMP_NUM_THREADS environment variable */
#endif
if (argc > 1) /* overwrite with command line value, if supplied */
num_threads = (int) strtol(argv[1], NULL, 0);
/* store the inputs in the UserData structure */
udata->N = N;
udata->a = a;
udata->b = b;
udata->du = du;
udata->dv = dv;
udata->dw = dw;
udata->ep = ep;
udata->nthreads = num_threads;
/* set total allocated vector length */
NEQ = Nvar*udata->N;
/* Initial problem output */
printf("\n1D Brusselator PDE test problem:\n");
printf(" N = %li, NEQ = %li\n", (long int) udata->N, (long int) NEQ);
printf(" num_threads = %i\n", num_threads);
printf(" problem parameters: a = %"GSYM", b = %"GSYM", ep = %"GSYM"\n",
udata->a, udata->b, udata->ep);
printf(" diffusion coefficients: du = %"GSYM", dv = %"GSYM", dw = %"GSYM"\n",
udata->du, udata->dv, udata->dw);
printf(" reltol = %.1"ESYM", abstol = %.1"ESYM"\n\n", reltol, abstol);
/* Initialize vector data structures */
y = N_VNew_OpenMP(NEQ, num_threads); /* Create vector for solution */
if (check_flag((void *)y, "N_VNew_OpenMP", 0)) return 1;
udata->dx = RCONST(1.0)/(N-1); /* set spatial mesh spacing */
data = N_VGetArrayPointer(y); /* Access data array for new NVector y */
if (check_flag((void *)data, "N_VGetArrayPointer", 0)) return 1;
umask = N_VNew_OpenMP(NEQ, num_threads); /* Create vector masks */
if (check_flag((void *)umask, "N_VNew_OpenMP", 0)) return 1;
vmask = N_VNew_OpenMP(NEQ, num_threads);
if (check_flag((void *)vmask, "N_VNew_OpenMP", 0)) return 1;
wmask = N_VNew_OpenMP(NEQ, num_threads);
if (check_flag((void *)wmask, "N_VNew_OpenMP", 0)) return 1;
/* Set initial conditions into y */
pi = RCONST(4.0)*atan(RCONST(1.0));
for (i=0; i<N; i++) {
data[IDX(i,0)] = a + RCONST(0.1)*sin(pi*i*udata->dx); /* u */
data[IDX(i,1)] = b/a + RCONST(0.1)*sin(pi*i*udata->dx); /* v */
data[IDX(i,2)] = b + RCONST(0.1)*sin(pi*i*udata->dx); /* w */
}
/* Set mask array values for each solution component */
N_VConst(0.0, umask);
data = N_VGetArrayPointer(umask);
if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) data[IDX(i,0)] = RCONST(1.0);
N_VConst(0.0, vmask);
data = N_VGetArrayPointer(vmask);
if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) data[IDX(i,1)] = RCONST(1.0);
N_VConst(0.0, wmask);
data = N_VGetArrayPointer(wmask);
if (check_flag((void *) data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) data[IDX(i,2)] = RCONST(1.0);
/* Initialize matrix and linear solver data structures */
A = SUNBandMatrix(NEQ, 4, 4);
if (check_flag((void *)A, "SUNBandMatrix", 0)) return 1;
LS = SUNLinSol_Band(y, A);
if (check_flag((void *)LS, "SUNLinSol_Band", 0)) return 1;
/* Call ARKStepCreate to initialize the ARK timestepper module and
specify the right-hand side function in y'=f(t,y), the inital time
T0, and the initial dependent variable vector y. Note: since this
problem is fully implicit, we set f_E to NULL and f_I to f. */
arkode_mem = ARKStepCreate(NULL, f, T0, y);
if (check_flag((void *)arkode_mem, "ARKStepCreate", 0)) return 1;
/* Set routines */
flag = ARKStepSetUserData(arkode_mem, (void *) udata); /* Pass udata to user functions */
if (check_flag(&flag, "ARKStepSetUserData", 1)) return 1;
flag = ARKStepSStolerances(arkode_mem, reltol, abstol); /* Specify tolerances */
if (check_flag(&flag, "ARKStepSStolerances", 1)) return 1;
/* Linear solver specification */
flag = ARKStepSetLinearSolver(arkode_mem, LS, A); /* Attach matrix and linear solver */
if (check_flag(&flag, "ARKStepSetLinearSolver", 1)) return 1;
flag = ARKStepSetJacFn(arkode_mem, Jac); /* Set the Jacobian routine */
if (check_flag(&flag, "ARKStepSetJacFn", 1)) return 1;
/* output spatial mesh to disk */
FID=fopen("bruss_mesh.txt","w");
for (i=0; i<N; i++) fprintf(FID," %.16"ESYM"\n", udata->dx*i);
fclose(FID);
/* Open output stream for results, access data arrays */
UFID=fopen("bruss_u.txt","w");
VFID=fopen("bruss_v.txt","w");
WFID=fopen("bruss_w.txt","w");
/* output initial condition to disk */
data = N_VGetArrayPointer(y);
if (check_flag((void *)data, "N_VGetArrayPointer", 0)) return 1;
for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM, data[IDX(i,0)]);
for (i=0; i<N; i++) fprintf(VFID," %.16"ESYM, data[IDX(i,1)]);
for (i=0; i<N; i++) fprintf(WFID," %.16"ESYM, data[IDX(i,2)]);
fprintf(UFID,"\n");
fprintf(VFID,"\n");
fprintf(WFID,"\n");
/* Main time-stepping loop: calls ARKStepEvolve to perform the integration, then
prints results. Stops when the final time has been reached */
t = T0;
dTout = (Tf-T0)/Nt;
tout = T0+dTout;
printf(" t ||u||_rms ||v||_rms ||w||_rms\n");
printf(" ----------------------------------------------\n");
for (iout=0; iout<Nt; iout++) {
flag = ARKStepEvolve(arkode_mem, tout, y, &t, ARK_NORMAL); /* call integrator */
if (check_flag(&flag, "ARKStepEvolve", 1)) break;
u = N_VWL2Norm(y,umask); /* access/print solution statistics */
u = sqrt(u*u/N);
v = N_VWL2Norm(y,vmask);
v = sqrt(v*v/N);
w = N_VWL2Norm(y,wmask);
w = sqrt(w*w/N);
printf(" %10.6"FSYM" %10.6"FSYM" %10.6"FSYM" %10.6"FSYM"\n", t, u, v, w);
if (flag >= 0) { /* successful solve: update output time */
tout += dTout;
tout = (tout > Tf) ? Tf : tout;
} else { /* unsuccessful solve: break */
fprintf(stderr,"Solver failure, stopping integration\n");
break;
}
/* output results to disk */
for (i=0; i<N; i++) fprintf(UFID," %.16"ESYM, data[IDX(i,0)]);
for (i=0; i<N; i++) fprintf(VFID," %.16"ESYM, data[IDX(i,1)]);
for (i=0; i<N; i++) fprintf(WFID," %.16"ESYM, data[IDX(i,2)]);
fprintf(UFID,"\n");
fprintf(VFID,"\n");
fprintf(WFID,"\n");
}
printf(" ----------------------------------------------\n");
fclose(UFID);
fclose(VFID);
fclose(WFID);
/* Print some final statistics */
flag = ARKStepGetNumSteps(arkode_mem, &nst);
check_flag(&flag, "ARKStepGetNumSteps", 1);
flag = ARKStepGetNumStepAttempts(arkode_mem, &nst_a);
check_flag(&flag, "ARKStepGetNumStepAttempts", 1);
flag = ARKStepGetNumRhsEvals(arkode_mem, &nfe, &nfi);
check_flag(&flag, "ARKStepGetNumRhsEvals", 1);
flag = ARKStepGetNumLinSolvSetups(arkode_mem, &nsetups);
check_flag(&flag, "ARKStepGetNumLinSolvSetups", 1);
flag = ARKStepGetNumErrTestFails(arkode_mem, &netf);
check_flag(&flag, "ARKStepGetNumErrTestFails", 1);
flag = ARKStepGetNumNonlinSolvIters(arkode_mem, &nni);
check_flag(&flag, "ARKStepGetNumNonlinSolvIters", 1);
flag = ARKStepGetNumNonlinSolvConvFails(arkode_mem, &ncfn);
check_flag(&flag, "ARKStepGetNumNonlinSolvConvFails", 1);
flag = ARKStepGetNumJacEvals(arkode_mem, &nje);
check_flag(&flag, "ARKStepGetNumJacEvals", 1);
flag = ARKStepGetNumLinRhsEvals(arkode_mem, &nfeLS);
check_flag(&flag, "ARKStepGetNumLinRhsEvals", 1);
printf("\nFinal Solver Statistics:\n");
printf(" Internal solver steps = %li (attempted = %li)\n", nst, nst_a);
printf(" Total RHS evals: Fe = %li, Fi = %li\n", nfe, nfi);
printf(" Total linear solver setups = %li\n", nsetups);
printf(" Total RHS evals for setting up the linear system = %li\n", nfeLS);
printf(" Total number of Jacobian evaluations = %li\n", nje);
printf(" Total number of Newton iterations = %li\n", nni);
printf(" Total number of nonlinear solver convergence failures = %li\n", ncfn);
printf(" Total number of error test failures = %li\n\n", netf);
/* Clean up and return with successful completion */
free(udata); /* Free user data */
ARKStepFree(&arkode_mem); /* Free integrator memory */
SUNLinSolFree(LS); /* Free linear solver */
SUNMatDestroy(A); /* Free matrix */
N_VDestroy(y); /* Free vectors */
N_VDestroy(umask);
N_VDestroy(vmask);
N_VDestroy(wmask);
return 0;
}
/*-------------------------------
* Functions called by the solver
*-------------------------------*/
/* f routine to compute the ODE RHS function f(t,y). */
static int f(realtype t, N_Vector y, N_Vector ydot, void *user_data)
{
UserData udata = (UserData) user_data; /* access problem data */
sunindextype N = udata->N; /* set variable shortcuts */
realtype a = udata->a;
realtype b = udata->b;
realtype ep = udata->ep;
realtype du = udata->du;
realtype dv = udata->dv;
realtype dw = udata->dw;
realtype dx = udata->dx;
realtype *Ydata=NULL, *dYdata=NULL;
realtype uconst, vconst, wconst, u, ul, ur, v, vl, vr, w, wl, wr;
sunindextype i = 0;
/* clear out ydot (to be careful) */
N_VConst(0.0, ydot);
Ydata = N_VGetArrayPointer(y); /* access data arrays */
if (check_flag((void *)Ydata, "N_VGetArrayPointer", 0)) return 1;
dYdata = N_VGetArrayPointer(ydot);
if (check_flag((void *)dYdata, "N_VGetArrayPointer", 0)) return 1;
N_VConst(0.0, ydot); /* initialize ydot to zero */
/* iterate over domain, computing all equations */
uconst = du/dx/dx;
vconst = dv/dx/dx;
wconst = dw/dx/dx;
#pragma omp parallel for default(shared) private(i,u,ul,ur,v,vl,vr,w,wl,wr) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++) {
/* set shortcuts */
u = Ydata[IDX(i,0)]; ul = Ydata[IDX(i-1,0)]; ur = Ydata[IDX(i+1,0)];
v = Ydata[IDX(i,1)]; vl = Ydata[IDX(i-1,1)]; vr = Ydata[IDX(i+1,1)];
w = Ydata[IDX(i,2)]; wl = Ydata[IDX(i-1,2)]; wr = Ydata[IDX(i+1,2)];
/* u_t = du*u_xx + a - (w+1)*u + v*u^2 */
dYdata[IDX(i,0)] = (ul - RCONST(2.0)*u + ur)*uconst + a - (w+RCONST(1.0))*u + v*u*u;
/* v_t = dv*v_xx + w*u - v*u^2 */
dYdata[IDX(i,1)] = (vl - RCONST(2.0)*v + vr)*vconst + w*u - v*u*u;
/* w_t = dw*w_xx + (b-w)/ep - w*u */
dYdata[IDX(i,2)] = (wl - RCONST(2.0)*w + wr)*wconst + (b-w)/ep - w*u;
}
/* enforce stationary boundaries */
dYdata[IDX(0,0)] = dYdata[IDX(0,1)] = dYdata[IDX(0,2)] = 0.0;
dYdata[IDX(N-1,0)] = dYdata[IDX(N-1,1)] = dYdata[IDX(N-1,2)] = 0.0;
return 0;
}
/* Jacobian routine to compute J(t,y) = df/dy. */
static int Jac(realtype t, N_Vector y, N_Vector fy,
SUNMatrix J, void *user_data,
N_Vector tmp1, N_Vector tmp2, N_Vector tmp3)
{
UserData udata = (UserData) user_data; /* access problem data */
SUNMatZero(J); /* Initialize Jacobian to zero */
/* Fill in the Laplace matrix */
if (LaplaceMatrix(RCONST(1.0), J, udata)) {
printf("Jacobian calculation error in calling LaplaceMatrix!\n");
return 1;
}
/* Add in the Jacobian of the reaction terms matrix */
if (ReactionJac(RCONST(1.0), y, J, udata)) {
printf("Jacobian calculation error in calling ReactionJac!\n");
return 1;
}
return 0;
}
/*-------------------------------
* Private helper functions
*-------------------------------*/
/* Routine to compute the stiffness matrix from (L*y), scaled by the factor c.
We add the result into Jac and do not erase what was already there */
static int LaplaceMatrix(realtype c, SUNMatrix Jac, UserData udata)
{
sunindextype N = udata->N; /* set shortcuts */
realtype dx = udata->dx;
sunindextype i = 0;
realtype uconst = c*udata->du/dx/dx;
realtype vconst = c*udata->dv/dx/dx;
realtype wconst = c*udata->dw/dx/dx;
/* iterate over intervals, filling in Jacobian entries */
#pragma omp parallel for default(shared) private(i) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++) {
/* Jacobian of (L*y) at this node */
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i-1,0)) += uconst;
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i-1,1)) += vconst;
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i-1,2)) += wconst;
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,0)) -= RCONST(2.0)*uconst;
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,1)) -= RCONST(2.0)*vconst;
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,2)) -= RCONST(2.0)*wconst;
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i+1,0)) += uconst;
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i+1,1)) += vconst;
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i+1,2)) += wconst;
}
return 0;
}
/* Routine to compute the Jacobian matrix from R(y), scaled by the factor c.
We add the result into Jac and do not erase what was already there */
static int ReactionJac(realtype c, N_Vector y, SUNMatrix Jac, UserData udata)
{
sunindextype N = udata->N; /* set shortcuts */
realtype ep = udata->ep;
sunindextype i = 0;
realtype u, v, w;
realtype *Ydata = N_VGetArrayPointer(y); /* access solution array */
if (check_flag((void *)Ydata, "N_VGetArrayPointer", 0)) return 1;
/* iterate over nodes, filling in Jacobian entries */
#pragma omp parallel for default(shared) private(i,u,v,w) schedule(static) num_threads(udata->nthreads)
for (i=1; i<N-1; i++) {
/* set nodal value shortcuts (shifted index due to start at first interior node) */
u = Ydata[IDX(i,0)];
v = Ydata[IDX(i,1)];
w = Ydata[IDX(i,2)];
/* all vars wrt u */
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,0)) += c*(RCONST(2.0)*u*v-(w+RCONST(1.0)));
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,0)) += c*(w - RCONST(2.0)*u*v);
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,0)) += c*(-w);
/* all vars wrt v */
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,1)) += c*(u*u);
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,1)) += c*(-u*u);
/* all vars wrt w */
SM_ELEMENT_B(Jac,IDX(i,0),IDX(i,2)) += c*(-u);
SM_ELEMENT_B(Jac,IDX(i,1),IDX(i,2)) += c*(u);
SM_ELEMENT_B(Jac,IDX(i,2),IDX(i,2)) += c*(-RCONST(1.0)/ep - u);
}
return 0;
}
/* Check function return value...
opt == 0 means SUNDIALS function allocates memory so check if
returned NULL pointer
opt == 1 means SUNDIALS function returns a flag so check if
flag >= 0
opt == 2 means function allocates memory so check if returned
NULL pointer
*/
static int check_flag(void *flagvalue, const char *funcname, int opt)
{
int *errflag;
/* Check if SUNDIALS function returned NULL pointer - no memory allocated */
if (opt == 0 && flagvalue == NULL) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
/* Check if flag < 0 */
else if (opt == 1) {
errflag = (int *) flagvalue;
if (*errflag < 0) {
fprintf(stderr, "\nSUNDIALS_ERROR: %s() failed with flag = %d\n\n",
funcname, *errflag);
return 1; }}
/* Check if function returned NULL pointer - no memory allocated */
else if (opt == 2 && flagvalue == NULL) {
fprintf(stderr, "\nMEMORY_ERROR: %s() failed - returned NULL pointer\n\n",
funcname);
return 1; }
return 0;
}
/*---- end of file ----*/
|
DRB032-truedepfirstdimension-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
The outer loop has a loop-carried true dependence.
Data race pair: b[i][j]@69:7 vs. b[i-1][j-1]@69:15
*/
#include <stdlib.h>
int main(int argc, char* argv[])
{
int i,j;
int len = 1000;
if (argc>1)
len = atoi(argv[1]);
int n=len, m=len;
double b[len][len];
#pragma omp parallel for
for (i=0; i<n; i++)
#pragma omp parallel for
for (j=0; j<m; j++)
b[i][j] = 0.5;
for (i=1;i<n;i++)
#pragma omp parallel for
for (j=1;j<m;j++)
b[i][j]=b[i-1][j-1];
for (i=0; i<n; i++)
for (j=0; j<m; j++)
printf("%lf\n",b[i][j]);
return 0;
}
|
GB_unop__identity_int32_int32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2020, All Rights Reserved.
// http://suitesparse.com See GraphBLAS/Doc/License.txt for license.
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: (none)
// op(A') function: GB_unop_tran__identity_int32_int32
// C type: int32_t
// A type: int32_t
// cast: int32_t cij = aij
// unaryop: cij = aij
#define GB_ATYPE \
int32_t
#define GB_CTYPE \
int32_t
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
int32_t aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = x ;
// casting
#define GB_CAST(z, aij) \
int32_t z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
int32_t aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
int32_t z = aij ; \
Cx [pC] = z ; \
}
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_IDENTITY || GxB_NO_INT32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
#if 0
GrB_Info (none)
(
int32_t *Cx, // Cx and Ax may be aliased
const int32_t *Ax,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
int32_t aij = Ax [p] ;
int32_t z = aij ;
Cx [p] = z ;
}
return (GrB_SUCCESS) ;
#endif
}
#endif
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__identity_int32_int32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Rowcounts,
GBI_single_iterator Iter,
const int64_t *GB_RESTRICT A_slice,
int naslice
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#define GB_PHASE_2_OF_2
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_binop__islt_fp64.c | //------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_01__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_02__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_03__islt_fp64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__islt_fp64)
// A*D function (colscale): GB (_AxD__islt_fp64)
// D*A function (rowscale): GB (_DxB__islt_fp64)
// C+=B function (dense accum): GB (_Cdense_accumB__islt_fp64)
// C+=b function (dense accum): GB (_Cdense_accumb__islt_fp64)
// C+=A+B function (dense ewise3): GB ((none))
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__islt_fp64)
// C=scalar+B GB (_bind1st__islt_fp64)
// C=scalar+B' GB (_bind1st_tran__islt_fp64)
// C=A+scalar GB (_bind2nd__islt_fp64)
// C=A'+scalar GB (_bind2nd_tran__islt_fp64)
// C type: double
// A type: double
// B,b type: double
// BinaryOp: cij = (aij < bij)
#define GB_ATYPE \
double
#define GB_BTYPE \
double
#define GB_CTYPE \
double
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
double aij = GBX (Ax, pA, A_iso)
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
double bij = GBX (Bx, pB, B_iso)
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
double t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = (x < y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_ISLT || GxB_NO_FP64 || GxB_NO_ISLT_FP64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
#if 0
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB ((none))
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
#endif
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_ewise3_noaccum__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_dense_ewise3_noaccum_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__islt_fp64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type double
double bwork = (*((double *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A, bool A_is_pattern,
const GrB_Matrix D, bool D_is_pattern,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix D, bool D_is_pattern,
const GrB_Matrix B, bool B_is_pattern,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *restrict Cx = (double *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C = A+B or C<M> = A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__islt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
#include "GB_add_template.c"
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C = A.*B or C<M> = A.*B
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_01__islt_fp64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_01_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_03__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_03_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__islt_fp64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__islt_fp64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double *Cx = (double *) Cx_output ;
double x = (*((double *) x_input)) ;
double *Bx = (double *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
double bij = GBX (Bx, p, false) ;
Cx [p] = (x < bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__islt_fp64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
double *Cx = (double *) Cx_output ;
double *Ax = (double *) Ax_input ;
double y = (*((double *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
double aij = GBX (Ax, p, false) ;
Cx [p] = (aij < y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (x < aij) ; \
}
GrB_Info GB (_bind1st_tran__islt_fp64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
double
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double x = (*((const double *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
double
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
double aij = GBX (Ax, pA, false) ; \
Cx [pC] = (aij < y) ; \
}
GrB_Info GB (_bind2nd_tran__islt_fp64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
double y = (*((const double *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
2.h | //
// Created by aleksey on 03.11.16.
//
#ifndef TASKSOPENMP_2A_H
#define TASKSOPENMP_2A_H
#include "Tree.h"
#include <iostream>
#include <omp.h>
using namespace std;
// 2a
void node_process(struct node *tree)
{
// Подсчитываем число узлов
num_nodes++;
// Связываем с каждым узлом какую-то работу
// Работа имеет разную вычислительную сложность для различных вершин
work(tree->num);
// Выводим номер узла, который обработали
//cout << tree->num << endl;
if (tree->left)
node_process(tree->left);
if (tree->right)
node_process(tree->right);
return;
}
void task_2a(node * tree){
cout << "2a) Последовательная обработка" << endl;
clock_t start, finish; // переменные для измерения времени
double time;
start = clock();
node_process(tree);
finish = clock();
time = (double) (finish - start) / CLOCKS_PER_SEC;
cout << "Время последовательного варианта " << time << endl;
cout << "Число вершин " << num_nodes << endl;
}
//2b
void node_process_parallel(struct node *tree)
{
// Подсчитываем число узлов
#pragma omp atomic
num_nodes++;
// Связываем с каждым узлом какую-то работу
// Работа имеет разную вычислительную сложность для различных вершин
work(tree->num);
// Выводим номер узла, который обработали
//cout << tree->num << " " << omp_get_thread_num() << endl;
#pragma omp task
if (tree->left)
node_process_parallel(tree->left);
#pragma omp task
if (tree->right)
node_process_parallel(tree->right);
return;
}
void task_2b(node * tree)
{
cout << "2a) Последовательный и параллельный варианты" << endl;
clock_t start, finish; // переменные для измерения времени
double time1, time2;
start = clock();
node_process(tree);
finish = clock();
time1 = (double)(finish - start)/CLOCKS_PER_SEC;
start = clock();
#pragma omp parallel
{
#pragma omp single
{
node_process_parallel(tree);
}
}
finish = clock();
time2 = (double)(finish - start)/CLOCKS_PER_SEC;
cout << "Время последовательного варианта " << time1 << endl;
cout << "Время параллельного варианта " << time2 << endl;
}
// 2c
void task_2c(node * tree)
{
cout << "2в) Параллельный вариант без 'pragma omp single'" << endl;
clock_t start, finish; // переменные для измерения времени
double time1, time2;
start = clock();
node_process(tree);
finish = clock();
time1 = (double)(finish - start)/CLOCKS_PER_SEC;
start = clock();
#pragma omp parallel
{
{
node_process_parallel(tree);
}
}
finish = clock();
time2 = (double)(finish - start)/CLOCKS_PER_SEC;
cout << "Время последовательного варианта " << time1 << endl;
cout << "Время параллельного варианта " << time2 << endl;
}
// 2d
void node_process_parallel_rising(struct node *tree)
{
// Подсчитываем число узлов
#pragma omp atomic
num_nodes++;
// Выводим номер узла, который обработали
//cout << tree->num << " " << omp_get_thread_num() << endl;
#pragma omp task
if (tree->left)
node_process_parallel(tree->left);
#pragma omp task
if (tree->right)
node_process_parallel(tree->right);
// Связываем с каждым узлом какую-то работу
// Работа имеет разную вычислительную сложность для различных вершин
work(tree->num);
return;
}
void task_2d(node * tree)
{
cout << "2г) Восходящий и нисходящий варианты" << endl;
clock_t start, finish; // переменные для измерения времени
double time1, time2;
start = clock();
cout << "Нисходящая обработка" << endl;
#pragma omp parallel
{
#pragma omp single
{
node_process_parallel(tree);
}
}
finish = clock();
time1 = (double)(finish - start)/CLOCKS_PER_SEC;
start = clock();
cout << "Восходящая обработка" << endl;
#pragma omp parallel
{
#pragma omp single
{
node_process_parallel_rising(tree);
}
}
finish = clock();
time2 = (double)(finish - start)/CLOCKS_PER_SEC;
cout << "Время нисходящей параллельной обработки " << time1 << endl;
cout << "Время восходящей параллельной обработки " << time2 << endl;
}
#endif //TASKSOPENMP_2A_H |
GB_split_sparse.c | //------------------------------------------------------------------------------
// GB_split_sparse: split a sparse/hypersparse matrix into tiles
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
#define GB_FREE_WORK \
GB_WERK_POP (C_ek_slicing, int64_t) ; \
GB_FREE_WERK (&Wp, Wp_size) ;
#define GB_FREE_ALL \
GB_FREE_WORK ; \
GB_Matrix_free (&C) ;
#include "GB_split.h"
GrB_Info GB_split_sparse // split a sparse matrix
(
GrB_Matrix *Tiles, // 2D row-major array of size m-by-n
const GrB_Index m,
const GrB_Index n,
const int64_t *restrict Tile_rows, // size m+1
const int64_t *restrict Tile_cols, // size n+1
const GrB_Matrix A, // input matrix
GB_Context Context
)
{
//--------------------------------------------------------------------------
// get inputs
//--------------------------------------------------------------------------
GrB_Info info ;
int A_sparsity = GB_sparsity (A) ;
bool A_is_hyper = (A_sparsity == GxB_HYPERSPARSE) ;
ASSERT (A_is_hyper || A_sparsity == GxB_SPARSE) ;
GrB_Matrix C = NULL ;
GB_WERK_DECLARE (C_ek_slicing, int64_t) ;
ASSERT_MATRIX_OK (A, "A sparse for split", GB0) ;
int sparsity_control = A->sparsity_control ;
float hyper_switch = A->hyper_switch ;
bool csc = A->is_csc ;
GrB_Type atype = A->type ;
int64_t avlen = A->vlen ;
int64_t avdim = A->vdim ;
size_t asize = atype->size ;
GB_GET_NTHREADS_MAX (nthreads_max, chunk, Context) ;
int64_t nouter = csc ? n : m ;
int64_t ninner = csc ? m : n ;
const int64_t *Tile_vdim = csc ? Tile_cols : Tile_rows ;
const int64_t *Tile_vlen = csc ? Tile_rows : Tile_cols ;
int64_t anvec = A->nvec ;
const int64_t *restrict Ap = A->p ;
const int64_t *restrict Ah = A->h ;
const int64_t *restrict Ai = A->i ;
const bool A_iso = A->iso ;
//--------------------------------------------------------------------------
// allocate workspace
//--------------------------------------------------------------------------
size_t Wp_size = 0 ;
int64_t *restrict Wp = NULL ;
Wp = GB_MALLOC_WERK (anvec, int64_t, &Wp_size) ;
if (Wp == NULL)
{
// out of memory
GB_FREE_ALL ;
return (GrB_OUT_OF_MEMORY) ;
}
GB_memcpy (Wp, Ap, anvec * sizeof (int64_t), nthreads_max) ;
//--------------------------------------------------------------------------
// split A into tiles
//--------------------------------------------------------------------------
int64_t akend = 0 ;
for (int64_t outer = 0 ; outer < nouter ; outer++)
{
//----------------------------------------------------------------------
// find the starting and ending vector of these tiles
//----------------------------------------------------------------------
// The tile appears in vectors avstart:avend-1 of A, and indices
// aistart:aiend-1.
const int64_t avstart = Tile_vdim [outer] ;
const int64_t avend = Tile_vdim [outer+1] ;
int64_t akstart = akend ;
if (A_is_hyper)
{
// A is hypersparse: look for vector avend in the A->h hyper list.
// The vectors to handle for this outer loop are in
// Ah [akstart:akend-1].
akend = akstart ;
int64_t pright = anvec - 1 ;
bool found ;
GB_SPLIT_BINARY_SEARCH (avend, Ah, akend, pright, found) ;
ASSERT (GB_IMPLIES (akstart <= akend-1, Ah [akend-1] < avend)) ;
}
else
{
// A is sparse; the vectors to handle are akstart:akend-1
akend = avend ;
}
// # of vectors in all tiles in this outer loop
int64_t cnvec = akend - akstart ;
int nth = GB_nthreads (cnvec, chunk, nthreads_max) ;
//----------------------------------------------------------------------
// create all tiles for vectors akstart:akend-1 in A
//----------------------------------------------------------------------
for (int64_t inner = 0 ; inner < ninner ; inner++)
{
//------------------------------------------------------------------
// allocate C, C->p, and C->h for this tile
//------------------------------------------------------------------
const int64_t aistart = Tile_vlen [inner] ;
const int64_t aiend = Tile_vlen [inner+1] ;
const int64_t cvdim = avend - avstart ;
const int64_t cvlen = aiend - aistart ;
C = NULL ;
GB_OK (GB_new (&C, false, // new header
atype, cvlen, cvdim, GB_Ap_malloc, csc, A_sparsity,
hyper_switch, cnvec, Context)) ;
C->sparsity_control = sparsity_control ;
C->hyper_switch = hyper_switch ;
C->nvec = cnvec ;
int64_t *restrict Cp = C->p ;
int64_t *restrict Ch = C->h ;
//------------------------------------------------------------------
// determine the boundaries of this tile
//------------------------------------------------------------------
int64_t k ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (k = akstart ; k < akend ; k++)
{
int64_t pA = Wp [k] ;
const int64_t pA_end = Ap [k+1] ;
const int64_t aknz = pA_end - pA ;
if (aknz == 0 || Ai [pA] >= aiend)
{
// this vector of C is empty
}
else if (aknz > 256)
{
// use binary search to find aiend
bool found ;
int64_t pright = pA_end - 1 ;
GB_SPLIT_BINARY_SEARCH (aiend, Ai, pA, pright, found) ;
#ifdef GB_DEBUG
// check the results with a linear search
int64_t p2 = Wp [k] ;
for ( ; p2 < Ap [k+1] ; p2++)
{
if (Ai [p2] >= aiend) break ;
}
ASSERT (pA == p2) ;
#endif
}
else
{
// use a linear-time search to find aiend
for ( ; pA < pA_end ; pA++)
{
if (Ai [pA] >= aiend) break ;
}
#ifdef GB_DEBUG
// check the results with a binary search
bool found ;
int64_t p2 = Wp [k] ;
int64_t p2_end = Ap [k+1] - 1 ;
GB_SPLIT_BINARY_SEARCH (aiend, Ai, p2, p2_end, found) ;
ASSERT (pA == p2) ;
#endif
}
Cp [k-akstart] = (pA - Wp [k]) ; // # of entries in this vector
if (A_is_hyper)
{
Ch [k-akstart] = Ah [k] - avstart ;
}
}
GB_cumsum (Cp, cnvec, &(C->nvec_nonempty), nth, Context) ;
int64_t cnz = Cp [cnvec] ;
//------------------------------------------------------------------
// allocate C->i and C->x for this tile
//------------------------------------------------------------------
// set C->iso = A_iso OK
GB_OK (GB_bix_alloc (C, cnz, GxB_SPARSE, false, true, A_iso,
Context)) ;
int64_t *restrict Ci = C->i ;
C->magic = GB_MAGIC ; // for GB_nnz_held(C), to slice C
//------------------------------------------------------------------
// copy the tile from A into C
//------------------------------------------------------------------
int C_ntasks, C_nthreads ;
GB_SLICE_MATRIX (C, 8, chunk) ;
bool done = false ;
if (A_iso)
{
//--------------------------------------------------------------
// split an iso matrix A into an iso tile C
//--------------------------------------------------------------
// A is iso and so is C; copy the iso entry
GBURBLE ("(iso sparse split) ") ;
memcpy (C->x, A->x, asize) ;
#define GB_ISO_SPLIT
#define GB_COPY(pC,pA) ;
#include "GB_split_sparse_template.c"
}
else
{
//--------------------------------------------------------------
// split a non-iso matrix A into an non-iso tile C
//--------------------------------------------------------------
#ifndef GBCOMPACT
// no typecasting needed
switch (asize)
{
#undef GB_COPY
#define GB_COPY(pC,pA) Cx [pC] = Ax [pA] ;
case GB_1BYTE : // uint8, int8, bool, or 1-byte user-defined
#define GB_CTYPE uint8_t
#include "GB_split_sparse_template.c"
break ;
case GB_2BYTE : // uint16, int16, or 2-byte user-defined
#define GB_CTYPE uint16_t
#include "GB_split_sparse_template.c"
break ;
case GB_4BYTE : // uint32, int32, float, or 4-byte user
#define GB_CTYPE uint32_t
#include "GB_split_sparse_template.c"
break ;
case GB_8BYTE : // uint64, int64, double, float complex,
// or 8-byte user defined
#define GB_CTYPE uint64_t
#include "GB_split_sparse_template.c"
break ;
case GB_16BYTE : // double complex or 16-byte user-defined
#define GB_CTYPE GB_blob16
// #define GB_CTYPE uint64_t
// #undef GB_COPY
// #define GB_COPY(pC,pA) \
// Cx [2*pC ] = Ax [2*pA ] ; \
// Cx [2*pC+1] = Ax [2*pA+1] ;
#include "GB_split_sparse_template.c"
break ;
default:;
}
#endif
}
if (!done)
{
// user-defined types
#define GB_CTYPE GB_void
#undef GB_COPY
#define GB_COPY(pC,pA) \
memcpy (Cx + (pC)*asize, Ax +(pA)*asize, asize) ;
#include "GB_split_sparse_template.c"
}
//------------------------------------------------------------------
// free workspace
//------------------------------------------------------------------
GB_WERK_POP (C_ek_slicing, int64_t) ;
//------------------------------------------------------------------
// advance to the next tile
//------------------------------------------------------------------
if (inner < ninner - 1)
{
int64_t k ;
#pragma omp parallel for num_threads(nth) schedule(static)
for (k = akstart ; k < akend ; k++)
{
int64_t ck = k - akstart ;
int64_t cknz = Cp [ck+1] - Cp [ck] ;
Wp [k] += cknz ;
}
}
//------------------------------------------------------------------
// conform the tile and save it in the Tiles array
//------------------------------------------------------------------
ASSERT_MATRIX_OK (C, "C for GB_split", GB0) ;
GB_OK (GB_hypermatrix_prune (C, Context)) ;
GB_OK (GB_conform (C, Context)) ;
if (csc)
{
GB_TILE (Tiles, inner, outer) = C ;
}
else
{
GB_TILE (Tiles, outer, inner) = C ;
}
ASSERT_MATRIX_OK (C, "final tile C for GB_split", GB0) ;
C = NULL ;
}
}
GB_FREE_WORK ;
return (GrB_SUCCESS) ;
}
|
quadip.c | #include<Python.h>
#include<numpy/arrayobject.h>
#include<math.h>
#define IND(a,i) *((double *)(a->data+i*a->strides[0]))
//#define IND_arr(a,i) (PyArrayObject *)(a->data+i*a->strides[0])
#define IND2(a,i,j) *((double *)(a->data+i*a->strides[0]+j*a->strides[1]))
static PyObject *quadip(PyObject *self, PyObject *args, PyObject *keywds);
static PyObject *quadip(PyObject *self, PyObject *args, PyObject *keywds)
{
PyObject *etc;
PyArrayObject *out, *ipparams, *position;
double a,b,c,d,e,f;
int i;
npy_intp dims[1];
static char *kwlist[] = {"ipparams","position","etc",NULL};
if(!PyArg_ParseTupleAndKeywords(args,keywds,"OO|O",kwlist,&ipparams,&position,&etc))
{
return NULL;
}
a = IND(ipparams,0);
b = IND(ipparams,1);
c = IND(ipparams,2);
d = IND(ipparams,3);
e = IND(ipparams,4);
f = IND(ipparams,5);
dims[0] = PyArray_DIM(position, 1);
out = (PyArrayObject *) PyArray_SimpleNew(1,dims,PyArray_DOUBLE);
#pragma omp parallel for
for(i=0;i<dims[0];i++)
{
IND(out,i) = a*pow(IND2(position,0,i),2)+b*pow(IND2(position,1,i),2)+ \
c*IND2(position,0,i)*IND2(position,1,i)+d*IND2(position,0,i)+e*IND2(position,1,i)+f;
}
return PyArray_Return(out);
}
static char module_docstring[]="\
This function fits the intra-pixel sensitivity effect using a 2D quadratic.\n\
\n\
Parameters\n\
----------\n\
a: quadratic coefficient in y\n\
b: quadratic coefficient in x\n\
c: coefficient for cross-term\n\
d: linear coefficient in y\n\
e: linear coefficient in x\n\
f: constant\n\
\n\
Returns\n\
-------\n\
returns the flux values for the intra-pixel model\n\
\n\
Revisions\n\
---------\n\
2008-07-05 Kevin Stevenson, UCF \n\
kevin218@knights.ucf.edu\n\
Original version\n\
2011-01-05 Nate Lust, UCF\n\
natelust at linux dot com\n\
Converted to c extention function\n\
2018-11-22 Jonathan Fraine, SSI\n\
jfraine at spacescience.org\n\
Updated c extensions to python3, with support for python2.7\n\
\n\
";
static PyMethodDef module_methods[] = {
{"quadip",(PyCFunction)quadip,METH_VARARGS|METH_KEYWORDS,module_docstring},{NULL}};
PyMODINIT_FUNC
#if PY_MAJOR_VERSION >= 3
PyInit_quadip(void)
#else
initquadip(void)
#endif
{
#if PY_MAJOR_VERSION >= 3
PyObject *module;
static struct PyModuleDef moduledef = {
PyModuleDef_HEAD_INIT,
"quadip", /* m_name */
module_docstring, /* m_doc */
-1, /* m_size */
module_methods, /* m_methods */
NULL, /* m_reload */
NULL, /* m_traverse */
NULL, /* m_clear */
NULL, /* m_free */
};
#endif
#if PY_MAJOR_VERSION >= 3
module = PyModule_Create(&moduledef);
if (!module)
return NULL;
/* Load `numpy` functionality. */
import_array();
return module;
#else
PyObject *m = Py_InitModule3("quadip", module_methods, module_docstring);
if (m == NULL)
return;
/* Load `numpy` functionality. */
import_array();
#endif
}
|
matvec.c | /*
* OpenMP implementation of matrix-vector multiplication (not optimized).
* To be used with the in-class demo in model [A2]: Task Mapping on Soft Heterogeneous Systems
*
* Apan Qasem <apan@txtstate.edu>
* last updated: 03/09/2021
*/
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include<omp.h>
#define VAL_RANGE 1023
/* timer function */
double get_time_in_seconds() {
struct timeval tp;
struct timezone tzp;
int i;
i = gettimeofday(&tp,&tzp);
return ( (double) tp.tv_sec + (double) tp.tv_usec * 1.e-6 );
}
double dot_prod(double *x, double *y, int n) {
double sum = 0.0;
int i;
#pragma omp parallel for reduction(+:sum)
for (i = 0; i < n; i++)
sum += x[i] * y[i];
return sum;
}
void matrix_vector_mult(double **mat, double *vec, double *result,
long long rows, long long cols) {
/* not parallelelized to ensure runtimes are more meaningful */
int i;
for (i = 0; i < rows; i++)
result[i] = dot_prod(mat[i], vec, cols);
}
void display_matrix(const double **matrix, long long N) {
int i, j;
for (i = 0; i < N; i++) {
for (j = 0; j < N; j++)
printf("%3.4f ", matrix[i][j]);
printf("\n");
}
}
int main(int argc, char *argv[]) {
if (argc < 4) {
printf("usage: \n");
printf(" ./matvec N n t\n");
printf(" N = matrix dimension\n");
printf(" n = number of reps\n");
printf(" t = number of threads\n");
exit(0);
}
/* matrix dimenstion, assume N x N matrix and vector of size of N*/
long long N = atoi(argv[1]);
/* number of reps; control running time of program */
unsigned n = atoi(argv[2]);
/* number of OpenMP threads */
unsigned threads = atoi(argv[3]);
omp_set_num_threads(threads);
double **matrix;
double *vec;
double *result;
int i, j;
double start_time, end_time;
/* memory allocation and initilization */
#ifdef INIT_TIMER
start_time = get_time_in_seconds();
#endif
matrix = (double **) malloc(sizeof(double *) * N);
for (i = 0; i < N; i++)
matrix[i] = (double *) malloc(sizeof(double) * N);
vec = (double *) malloc(sizeof(double) * N);
result = (double *) malloc(sizeof(double) * N);
for (i = 0; i < N; i++)
for (j = 0; j < N; j++)
matrix[i][j] = rand() / (double) (RAND_MAX/VAL_RANGE);
for (i = 0; i < N; i++)
vec[i] = rand() / (double) (RAND_MAX/VAL_RANGE);
#ifdef INIT_TIMER
end_time = get_time_in_seconds();
fprintf(stdout, "Initialization time = %.3f s\n", end_time - start_time);
#endif
/* computation */
start_time = get_time_in_seconds();
for (i = 0; i < n; i++)
matrix_vector_mult(matrix, vec, result, N, N);
end_time = get_time_in_seconds();
/* verification (by inspection only) */
fprintf(stdout, "Verification: ");
for (unsigned i = 0; i < 1; i++)
fprintf(stdout, "result[%d] = %3.2e\n", i, result[i]);
fprintf(stdout, "\n\033[0;33mCompute time = %.3f s\n\033[0m", end_time - start_time);
return 0;
}
|
3d7pt_var.c | /*
* Order-1, 3D 7 point stencil with variable coefficients
* Adapted from PLUTO and Pochoir test bench
*
* Tareq Malas
*/
#include <stdio.h>
#include <stdlib.h>
#include <sys/time.h>
#ifdef LIKWID_PERFMON
#include <likwid.h>
#endif
#include "print_utils.h"
#define TESTS 2
#define MAX(a,b) ((a) > (b) ? a : b)
#define MIN(a,b) ((a) < (b) ? a : b)
/* Subtract the `struct timeval' values X and Y,
* storing the result in RESULT.
*
* Return 1 if the difference is negative, otherwise 0.
*/
int timeval_subtract(struct timeval *result, struct timeval *x, struct timeval *y)
{
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec)
{
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000)
{
int nsec = (x->tv_usec - y->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
* tv_usec is certainly positive.
*/
result->tv_sec = x->tv_sec - y->tv_sec;
result->tv_usec = x->tv_usec - y->tv_usec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
int main(int argc, char *argv[])
{
int t, i, j, k, m, test;
int Nx, Ny, Nz, Nt;
if (argc > 3) {
Nx = atoi(argv[1])+2;
Ny = atoi(argv[2])+2;
Nz = atoi(argv[3])+2;
}
if (argc > 4)
Nt = atoi(argv[4]);
// allocate the arrays
double ****A = (double ****) malloc(sizeof(double***)*2);
for(m=0; m<2;m++){
A[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
A[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
A[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
double ****coef = (double ****) malloc(sizeof(double***)*7);
for(m=0; m<7;m++){
coef[m] = (double ***) malloc(sizeof(double**)*Nz);
for(i=0; i<Nz; i++){
coef[m][i] = (double**) malloc(sizeof(double*)*Ny);
for(j=0;j<Ny;j++){
coef[m][i][j] = (double*) malloc(sizeof(double)*Nx);
}
}
}
// tile size information, including extra element to decide the list length
int *tile_size = (int*) malloc(sizeof(int));
tile_size[0] = -1;
// The list is modified here before source-to-source transformations
tile_size = (int*) realloc((void *)tile_size, sizeof(int)*5);
tile_size[0] = 24;
tile_size[1] = 24;
tile_size[2] = 8;
tile_size[3] = 256;
tile_size[4] = -1;
// for timekeeping
int ts_return = -1;
struct timeval start, end, result;
double tdiff = 0.0, min_tdiff=1.e100;
const int BASE = 1024;
// initialize variables
//
srand(42);
for (i = 1; i < Nz; i++) {
for (j = 1; j < Ny; j++) {
for (k = 1; k < Nx; k++) {
A[0][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
for (m=0; m<7; m++) {
for (i=1; i<Nz; i++) {
for (j=1; j<Ny; j++) {
for (k=1; k<Nx; k++) {
coef[m][i][j][k] = 1.0 * (rand() % BASE);
}
}
}
}
#ifdef LIKWID_PERFMON
LIKWID_MARKER_INIT;
#pragma omp parallel
{
LIKWID_MARKER_THREADINIT;
#pragma omp barrier
LIKWID_MARKER_START("calc");
}
#endif
int num_threads = 1;
#if defined(_OPENMP)
num_threads = omp_get_max_threads();
#endif
for(test=0; test<TESTS; test++){
gettimeofday(&start, 0);
// serial execution - Addition: 6 && Multiplication: 2
#pragma scop
for (t = 0; t < Nt-1; t++) {
for (i = 1; i < Nz-1; i++) {
for (j = 1; j < Ny-1; j++) {
for (k = 1; k < Nx-1; k++) {
A[(t+1)%2][i][j][k] = coef[0][i][j][k] * A[t%2][i ][j ][k ] +
coef[1][i][j][k] * A[t%2][i-1][j ][k ] +
coef[2][i][j][k] * A[t%2][i ][j-1][k ] +
coef[3][i][j][k] * A[t%2][i ][j ][k-1] +
coef[4][i][j][k] * A[t%2][i+1][j ][k ] +
coef[5][i][j][k] * A[t%2][i ][j+1][k ] +
coef[6][i][j][k] * A[t%2][i ][j ][k+1];
}
}
}
}
#pragma endscop
gettimeofday(&end, 0);
ts_return = timeval_subtract(&result, &end, &start);
tdiff = (double) (result.tv_sec + result.tv_usec * 1.0e-6);
min_tdiff = min(min_tdiff, tdiff);
printf("Rank 0 TEST# %d time: %f\n", test, tdiff);
}
PRINT_RESULTS(1, "variable no-symmetry")
#ifdef LIKWID_PERFMON
#pragma omp parallel
{
LIKWID_MARKER_STOP("calc");
}
LIKWID_MARKER_CLOSE;
#endif
// Free allocated arrays
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(A[0][i][j]);
free(A[1][i][j]);
}
free(A[0][i]);
free(A[1][i]);
}
free(A[0]);
free(A[1]);
for(m=0; m<7;m++){
for(i=0; i<Nz; i++){
for(j=0;j<Ny;j++){
free(coef[m][i][j]);
}
free(coef[m][i]);
}
free(coef[m]);
}
return 0;
}
|
ast-dump-openmp-target-parallel-for-simd.c | // RUN: %clang_cc1 -triple x86_64-unknown-unknown -fopenmp -ast-dump %s | FileCheck --match-full-lines -implicit-check-not=openmp_structured_block %s
void test_one(int x) {
#pragma omp target parallel for simd
for (int i = 0; i < x; i++)
;
}
void test_two(int x, int y) {
#pragma omp target parallel for simd
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_three(int x, int y) {
#pragma omp target parallel for simd collapse(1)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_four(int x, int y) {
#pragma omp target parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
;
}
void test_five(int x, int y, int z) {
#pragma omp target parallel for simd collapse(2)
for (int i = 0; i < x; i++)
for (int i = 0; i < y; i++)
for (int i = 0; i < z; i++)
;
}
// CHECK: TranslationUnitDecl {{.*}} <<invalid sloc>> <invalid sloc>
// CHECK: |-FunctionDecl {{.*}} <{{.*}}ast-dump-openmp-target-parallel-for-simd.c:3:1, line:7:1> line:3:6 test_one 'void (int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:22, line:7:1>
// CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:4:9, col:37>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:5:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:5:3, line:6:5>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:5:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <col:3, line:6:5>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:5:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:6:5> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:4:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:4:9) *const restrict'
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:5:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:9:1, line:14:1> line:9:6 test_two 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:15, col:19> col:19 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:22, col:26> col:26 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:29, line:14:1>
// CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:10:9, col:37>
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:11:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:12:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:11:3, line:13:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:11:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:12:5, line:13:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:12:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:13:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:10:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:10:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:11:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:12:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:11:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:12:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:16:1, line:21:1> line:16:6 test_three 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:17, col:21> col:21 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:24, col:28> col:28 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:31, line:21:1>
// CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:17:9, col:49>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:38, col:48>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:47> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:47> 'int' 1
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:18:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:19:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:18:3, line:20:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:18:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:19:5, line:20:7> openmp_structured_block
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:19:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:20:7>
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:17:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:17:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:18:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:19:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:18:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:19:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: |-FunctionDecl {{.*}} <line:23:1, line:28:1> line:23:6 test_four 'void (int, int)'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: | |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: | `-CompoundStmt {{.*}} <col:30, line:28:1>
// CHECK-NEXT: | `-OMPTargetParallelForSimdDirective {{.*}} <line:24:9, col:49>
// CHECK-NEXT: | |-OMPCollapseClause {{.*}} <col:38, col:48>
// CHECK-NEXT: | | `-ConstantExpr {{.*}} <col:47> 'int'
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:47> 'int' 2
// CHECK-NEXT: | |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-CapturedStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | |-FieldDecl {{.*}} <line:25:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-FieldDecl {{.*}} <line:26:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | |-ForStmt {{.*}} <line:25:3, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:25:8, col:17>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ForStmt {{.*}} <line:26:5, line:27:7>
// CHECK-NEXT: | | | |-DeclStmt {{.*}} <line:26:10, col:19>
// CHECK-NEXT: | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | |-<<<NULL>>>
// CHECK-NEXT: | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-NullStmt {{.*}} <line:27:7> openmp_structured_block
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <line:24:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:24:9) *const restrict'
// CHECK-NEXT: | | |-VarDecl {{.*}} <line:25:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | `-VarDecl {{.*}} <line:26:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:25:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:26:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-FunctionDecl {{.*}} <line:30:1, line:36:1> line:30:6 test_five 'void (int, int, int)'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:16, col:20> col:20 used x 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:23, col:27> col:27 used y 'int'
// CHECK-NEXT: |-ParmVarDecl {{.*}} <col:30, col:34> col:34 used z 'int'
// CHECK-NEXT: `-CompoundStmt {{.*}} <col:37, line:36:1>
// CHECK-NEXT: `-OMPTargetParallelForSimdDirective {{.*}} <line:31:9, col:49>
// CHECK-NEXT: |-OMPCollapseClause {{.*}} <col:38, col:48>
// CHECK-NEXT: | `-ConstantExpr {{.*}} <col:47> 'int'
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:47> 'int' 2
// CHECK-NEXT: |-OMPFirstprivateClause {{.*}} <<invalid sloc>> <implicit>
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: `-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-AlwaysInlineAttr {{.*}} <<invalid sloc>> Implicit __forceinline
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .part_id. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .privates. 'void *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .copy_fn. 'void (*const restrict)(void *const restrict, ...)'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .task_t. 'void *const'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-CapturedStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | | | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | | | |-<<<NULL>>>
// CHECK-NEXT: | | | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | | | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-RecordDecl {{.*}} <col:9> col:9 implicit struct definition
// CHECK-NEXT: | | |-CapturedRecordAttr {{.*}} <<invalid sloc>> Implicit
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:32:23> col:23 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | |-FieldDecl {{.*}} <line:33:25> col:25 implicit 'int'
// CHECK-NEXT: | | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | | `-FieldDecl {{.*}} <line:34:27> col:27 implicit 'int'
// CHECK-NEXT: | | `-OMPCaptureKindAttr {{.*}} <<invalid sloc>> Implicit 9
// CHECK-NEXT: | `-CapturedDecl {{.*}} <<invalid sloc>> <invalid sloc> nothrow
// CHECK-NEXT: | |-ForStmt {{.*}} <line:32:3, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:32:8, col:17>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:19, col:23> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:19> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:19> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:26, col:27> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:26> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:33:5, line:35:9>
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:33:10, col:19>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:21, col:25> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:21> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:21> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:25> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:28, col:29> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:28> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-ForStmt {{.*}} <line:34:7, line:35:9> openmp_structured_block
// CHECK-NEXT: | | |-DeclStmt {{.*}} <line:34:12, col:21>
// CHECK-NEXT: | | | `-VarDecl {{.*}} <col:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | | | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: | | |-<<<NULL>>>
// CHECK-NEXT: | | |-BinaryOperator {{.*}} <col:23, col:27> 'int' '<'
// CHECK-NEXT: | | | |-ImplicitCastExpr {{.*}} <col:23> 'int' <LValueToRValue>
// CHECK-NEXT: | | | | `-DeclRefExpr {{.*}} <col:23> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | | `-ImplicitCastExpr {{.*}} <col:27> 'int' <LValueToRValue>
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
// CHECK-NEXT: | | |-UnaryOperator {{.*}} <col:30, col:31> 'int' postfix '++'
// CHECK-NEXT: | | | `-DeclRefExpr {{.*}} <col:30> 'int' lvalue Var {{.*}} 'i' 'int'
// CHECK-NEXT: | | `-NullStmt {{.*}} <line:35:9>
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <line:31:9> col:9 implicit .global_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit .bound_tid. 'const int *const restrict'
// CHECK-NEXT: | |-ImplicitParamDecl {{.*}} <col:9> col:9 implicit __context 'struct (anonymous at {{.*}}ast-dump-openmp-target-parallel-for-simd.c:31:9) *const restrict'
// CHECK-NEXT: | |-VarDecl {{.*}} <line:32:8, col:16> col:12 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:16> 'int' 0
// CHECK-NEXT: | |-VarDecl {{.*}} <line:33:10, col:18> col:14 used i 'int' cinit
// CHECK-NEXT: | | `-IntegerLiteral {{.*}} <col:18> 'int' 0
// CHECK-NEXT: | `-VarDecl {{.*}} <line:34:12, col:20> col:16 used i 'int' cinit
// CHECK-NEXT: | `-IntegerLiteral {{.*}} <col:20> 'int' 0
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:32:23> 'int' lvalue ParmVar {{.*}} 'x' 'int'
// CHECK-NEXT: |-DeclRefExpr {{.*}} <line:33:25> 'int' lvalue ParmVar {{.*}} 'y' 'int'
// CHECK-NEXT: `-DeclRefExpr {{.*}} <line:34:27> 'int' lvalue ParmVar {{.*}} 'z' 'int'
|
gt.map2sam.c | /*
* PROJECT: GEM-Tools library
* FILE: gt.map2sam.c
* DATE: 02/02/2013
* AUTHOR(S): Santiago Marco-Sola <santiagomsola@gmail.com>
* DESCRIPTION: Converter from MAP to SAM
*/
#define GT_MAP2SAM "gt.map2sam"
#define GT_MAP2SAM_VERSION "1.1"
#include <getopt.h>
#ifdef HAVE_OPENMP
#include <omp.h>
#endif
#include "gem_tools.h"
typedef struct {
/* I/O */
char* name_input_file;
char* name_output_file;
char* name_reference_file;
char* name_gem_index_file;
char* sam_header_file;
bool mmap_input;
bool paired_end;
bool output_seq_qual_for_secondary_align;
/* Headers */
char *read_group_id;
/* SAM format */
bool compact_format;
/* Optional Fields */
bool optional_field_NH;
bool optional_field_NM;
bool optional_field_XT;
bool optional_field_XS;
bool optional_field_md;
bool calc_phred;
/* Misc */
uint64_t num_threads;
bool verbose;
/* Control flags */
bool load_index;
bool load_index_sequences;
gt_map_score_attributes map_score_attr;
} gt_stats_args;
gt_stats_args parameters = {
/* I/O */
.name_input_file=NULL,
.name_output_file=NULL,
.name_reference_file=NULL,
.name_gem_index_file=NULL,
.sam_header_file=NULL,
.mmap_input=false,
.paired_end=false,
.output_seq_qual_for_secondary_align=false,
/* Headers */
.read_group_id=NULL,
/* SAM format */
.compact_format=false,
/* Optional Fields */
.optional_field_NH=false,
.optional_field_NM=false,
.optional_field_XT=false,
.optional_field_XS=false,
.optional_field_md=false,
.calc_phred=false,
/* Misc */
.num_threads=1,
.verbose=false,
/* Control flags */
.load_index=false,
.load_index_sequences=false,
.map_score_attr.quality_format=GT_QUALS_OFFSET_33,
.map_score_attr.max_strata_searched=0,
};
gt_sequence_archive* gt_filter_open_sequence_archive(const bool load_sequences) {
gt_sequence_archive* sequence_archive = NULL;
if (parameters.name_gem_index_file!=NULL) { // Load GEM-IDX
sequence_archive = gt_sequence_archive_new(GT_BED_ARCHIVE);
gt_gemIdx_load_archive(parameters.name_gem_index_file,sequence_archive,load_sequences);
} else {
gt_input_file* const reference_file = gt_input_file_open(parameters.name_reference_file,false);
sequence_archive = gt_sequence_archive_new(GT_CDNA_ARCHIVE);
if (gt_input_multifasta_parser_get_archive(reference_file,sequence_archive)!=GT_IFP_OK) {
gt_fatal_error_msg("Error parsing reference file '%s'\n",parameters.name_reference_file);
}
gt_input_file_close(reference_file);
}
return sequence_archive;
}
void gt_map2sam_set_mapq_attr(gt_template *template,gt_map_score_attributes *ms_attr)
{
gt_sam_attribute *ys=NULL,*yq=NULL;
ys=gt_attributes_get_sam_attribute(template->attributes,"ms");
if(!ys || ys->type_id!='B') return;
yq=gt_attributes_get_sam_attribute(template->attributes,"mx");
ms_attr->mapping_cutoff=(yq && yq->type_id=='i')?yq->i_value:0;
uint64_t max_complete_strata[2]={0,0};
char *p=gt_string_get_string(ys->s_value);
if(p && *p && p[1]==',') {
char *p1;
max_complete_strata[0]=(uint64_t)strtoul(p+2,&p1,10);
if(*p1==',') max_complete_strata[1]=(uint64_t)strtoul(p1+1,&p,10);
}
uint64_t rd;
for(rd=0;rd<2;rd++) {
gt_alignment *al=gt_template_get_block(template,rd);
if(al) gt_attributes_add(al->attributes,GT_ATTR_ID_MAX_COMPLETE_STRATA,&max_complete_strata[rd],uint64_t);
}
gt_map_calculate_template_mapq_score(template,ms_attr);
}
gt_status gt_map2sam_print_template(gt_buffered_output_file *buffered_output,gt_template *template,gt_output_sam_attributes *output_sam_attributes,gt_stats_args *param)
{
if(parameters.calc_phred || parameters.optional_field_XT) gt_map_calculate_template_mapq_score(template,¶m->map_score_attr);
// Print SAM template
return gt_output_sam_bofprint_template(buffered_output,template,output_sam_attributes);
}
gt_status gt_map2sam_print_alignment(gt_buffered_output_file *buffered_output,gt_alignment *alignment,gt_output_sam_attributes *output_sam_attributes,gt_stats_args *param)
{
if(parameters.calc_phred || parameters.optional_field_XT) gt_map_calculate_alignment_mapq_score(alignment,¶m->map_score_attr);
// Print SAM template
return gt_output_sam_bofprint_alignment(buffered_output,alignment,output_sam_attributes);
}
void gt_map2sam_read__write()
{
// Open file IN/OUT
gt_input_file* const input_file = (parameters.name_input_file==NULL) ?
gt_input_stream_map_open(stdin) : gt_input_file_map_open(parameters.name_input_file,parameters.mmap_input);
gt_output_file* const output_file = (parameters.name_output_file==NULL) ?
gt_output_stream_new(stdout,SORTED_FILE) : gt_output_file_new(parameters.name_output_file,SORTED_FILE);
gt_sam_headers* const sam_headers = gt_sam_header_new(); // SAM headers
if(parameters.sam_header_file) {
gt_input_file* const sam_headers_input_file = gt_input_file_sam_open(parameters.sam_header_file,false);
uint64_t characters_read = 0, lines_read = 0;
gt_status error_code=gt_input_file_sam_read_headers((char *)sam_headers_input_file->file_buffer,sam_headers_input_file->buffer_size,sam_headers,&characters_read,&lines_read);
if(error_code) gt_error(PARSE_SAM_HEADER_NOT_SAM,sam_headers_input_file->file_name);
gt_input_file_close(sam_headers_input_file);
uint64_t xx=0;
gt_string *pg_id=gt_string_new(64);
gt_sprintf(pg_id,"GToolsLib#%"PRIu64,++xx);
gt_string *prev_id=NULL;
if(sam_headers->program_id_hash) {
gt_sam_header_record* hr=*(gt_sam_header_record **)gt_vector_get_last_elm(sam_headers->program,gt_sam_header_record*);
prev_id=gt_sam_header_record_get_tag(hr,"ID");
do {
if(!gt_shash_get_element(sam_headers->program_id_hash,gt_string_get_string(pg_id))) break;
gt_sprintf(pg_id,"GToolsLib#%"PRIu64,++xx);
} while(xx<10000);
}
if(xx<10000) {
gt_sam_header_record *hr=gt_sam_header_record_new();
gt_sam_header_record_add_tag(hr,"ID",pg_id);
gt_string *pn_st=gt_string_set_new(GT_MAP2SAM);
gt_sam_header_record_add_tag(hr,"PN",pn_st);
gt_string *vn_st=gt_string_set_new(GT_MAP2SAM_VERSION);
if(prev_id) gt_sam_header_record_add_tag(hr,"PP",prev_id);
gt_sam_header_record_add_tag(hr,"VN",vn_st);
gt_sam_header_add_program_record(sam_headers,hr);
}
}
// Open reference file
gt_sequence_archive* sequence_archive = NULL;
if (parameters.load_index) {
sequence_archive = gt_filter_open_sequence_archive(parameters.load_index_sequences);
gt_sam_header_load_sequence_archive(sam_headers,sequence_archive);
}
// Print SAM headers
gt_output_sam_ofprint_headers_sh(output_file,sam_headers);
// Parallel reading+process
#ifdef HAVE_OPENMP
#pragma omp parallel num_threads(parameters.num_threads)
#endif
{
gt_status error_code;
gt_buffered_input_file* buffered_input = gt_buffered_input_file_new(input_file);
gt_buffered_output_file* buffered_output = gt_buffered_output_file_new(output_file);
gt_buffered_input_file_attach_buffered_output(buffered_input,buffered_output);
// I/O attributes
gt_map_parser_attributes* const input_map_attributes = gt_input_map_parser_attributes_new(parameters.paired_end);
gt_output_sam_attributes* const output_sam_attributes = gt_output_sam_attributes_new();
// Set out attributes
output_sam_attributes->output_seq_qual_for_secondary_align=parameters.output_seq_qual_for_secondary_align;
gt_output_sam_attributes_set_compact_format(output_sam_attributes,parameters.compact_format);
gt_output_sam_attributes_set_qualities_offset(output_sam_attributes,parameters.map_score_attr.quality_format);
gt_output_sam_attributes_set_print_mismatches(output_sam_attributes,false);
gt_sam_attributes_add_tag_options(gt_map2sam_attribute_option_list,output_sam_attributes->sam_attributes);
if(sam_headers->read_group_id_hash) {
gt_sam_header_record *hr=NULL;
if (parameters.read_group_id) {
size_t* ix=gt_shash_get_element(sam_headers->read_group_id_hash,parameters.read_group_id);
if(ix) {
hr=*(gt_sam_header_record **)gt_vector_get_elm(sam_headers->read_group,*ix,gt_sam_header_record*);
} else gt_error(SAM_OUTPUT_UNKNOWN_RG_ID,parameters.read_group_id);
} else {
hr=*(gt_sam_header_record **)gt_vector_get_last_elm(sam_headers->read_group,gt_sam_header_record*);
}
if(hr) {
gt_string *id_tag=gt_sam_header_record_get_tag(hr,"ID");
if(!id_tag) gt_fatal_error(PARSE_SAM_HEADER_MISSING_TAG,"RG","ID"); // Should have been detected before, but we check again anyway
gt_sam_attributes_add_tag_RG(output_sam_attributes->sam_attributes,id_tag);
gt_string *lib_tag=gt_sam_header_record_get_tag(hr,"LB");
if(lib_tag) gt_sam_attributes_add_tag_LB(output_sam_attributes->sam_attributes,lib_tag);
}
} else if(parameters.read_group_id) gt_error(SAM_OUTPUT_NO_HEADER_FOR_RG);
gt_template* template = gt_template_new();
while ((error_code=gt_input_map_parser_get_template(buffered_input,template,input_map_attributes))) {
if (error_code!=GT_IMP_OK) {
gt_error_msg("Fatal error parsing file '%s':%"PRIu64"\n",parameters.name_input_file,buffered_input->current_line_num-1);
continue;
}
gt_status print_code;
if(parameters.paired_end) {
print_code=gt_map2sam_print_template(buffered_output,template,output_sam_attributes,¶meters);
} else {
print_code=gt_map2sam_print_alignment(buffered_output,gt_template_get_block(template,0),output_sam_attributes,¶meters);
}
if(print_code) gt_error_msg("Fatal error outputting read '"PRIgts"'\n",PRIgts_content(gt_template_get_string_tag(template)));
}
// Clean
gt_template_delete(template);
gt_input_map_parser_attributes_delete(input_map_attributes);
gt_output_sam_attributes_delete(output_sam_attributes);
gt_buffered_input_file_close(buffered_input);
gt_buffered_output_file_close(buffered_output);
}
// Release archive & Clean
if (sequence_archive) gt_sequence_archive_delete(sequence_archive);
gt_sam_header_delete(sam_headers);
gt_input_file_close(input_file);
gt_output_file_close(output_file);
}
void usage(const gt_option* const options,char* groups[],const bool print_inactive) {
fprintf(stderr, "USE: ./gt.map2sam [ARGS]...\n");
gt_options_fprint_menu(stderr,options,groups,false,print_inactive);
}
void parse_arguments(int argc,char** argv) {
struct option* gt_map2sam_getopt = gt_options_adaptor_getopt(gt_map2sam_options);
gt_string* const gt_map2sam_short_getopt = gt_options_adaptor_getopt_short(gt_map2sam_options);
int option, option_index;
while (true) {
// Get option & Select case
if ((option=getopt_long(argc,argv,
gt_string_get_string(gt_map2sam_short_getopt),gt_map2sam_getopt,&option_index))==-1) break;
switch (option) {
/* I/O */
case 'i':
parameters.name_input_file = optarg;
break;
case 'o':
parameters.name_output_file = optarg;
break;
case 'r':
parameters.name_reference_file = optarg;
parameters.load_index = true;
break;
case 'I':
parameters.name_gem_index_file = optarg;
parameters.load_index = true;
break;
case 's':
parameters.sam_header_file = optarg;
break;
case 'p':
parameters.paired_end = true;
break;
case 'Q':
parameters.calc_phred = true;
break;
case 200:
parameters.mmap_input = true;
gt_fatal_error(NOT_IMPLEMENTED);
break;
/* Headers */
case 300: // Read-group ID
parameters.read_group_id = optarg;
break;
// TODO
/* Alignments */
case 'q':
if (gt_streq(optarg,"offset-64")) {
parameters.map_score_attr.quality_format=GT_QUALS_OFFSET_64;
} else if (gt_streq(optarg,"offset-33")) {
parameters.map_score_attr.quality_format=GT_QUALS_OFFSET_33;
} else {
gt_fatal_error_msg("Quality format not recognized: '%s'",optarg);
}
break;
/* Optional Fields */
case 500:
if(gt_sam_attributes_parse_tag_option_string(gt_map2sam_attribute_option_list,optarg)!=GT_STATUS_OK) {
gt_fatal_error_msg("Unable to parse --tag option '%s'\n",optarg);
}
break;
/* Format */
case 'c':
parameters.compact_format = true;
break;
case 600:
parameters.output_seq_qual_for_secondary_align = true;
break;
/* Misc */
case 'v':
parameters.verbose = true;
break;
case 't':
#ifdef HAVE_OPENMP
parameters.num_threads = atol(optarg);
#endif
break;
case 'h':
usage(gt_map2sam_options,gt_map2sam_groups,false);
exit(1);
break;
case 'H':
usage(gt_map2sam_options,gt_map2sam_groups,true);
exit(1);
case 'J':
gt_options_fprint_json_menu(stderr,gt_map2sam_options,gt_map2sam_groups,true,false);
exit(1);
break;
case '?':
default:
gt_fatal_error_msg("Option not recognized");
}
}
/*
* Parameters check
*/
if (parameters.load_index && parameters.name_reference_file==NULL && parameters.name_gem_index_file==NULL) {
gt_fatal_error_msg("Reference file required");
}
if(!parameters.load_index && parameters.optional_field_XS){
gt_fatal_error_msg("Reference file required to compute XS field in SAM");
}
// Free
gt_string_delete(gt_map2sam_short_getopt);
}
int main(int argc,char** argv) {
// GT error handler
gt_handle_error_signals();
// Parsing command-line options
parse_arguments(argc,argv);
// map2sam !!
gt_map2sam_read__write();
return 0;
}
|
convolutiondepthwise_3x3_int8.h | // SenseNets is pleased to support the open source community by supporting ncnn available.
//
// Copyright (C) 2018 SenseNets Technology Ltd. All rights reserved.
//
// Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
// in compliance with the License. You may obtain a copy of the License at
//
// https://opensource.org/licenses/BSD-3-Clause
//
// Unless required by applicable law or agreed to in writing, software distributed
// under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
// CONDITIONS OF ANY KIND, either express or implied. See the License for the
// specific language governing permissions and limitations under the License.
#if __ARM_NEON
#include <arm_neon.h>
#endif // __ARM_NEON
#if __aarch64__
static void convdw3x3s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const signed char* kernel = (const signed char *)_kernel + p*9;
int* outptr0 = out;
int* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w*2;
const signed char* r3 = img0 + w*3;
int i = 0;
int8x8_t _k0 = vdup_n_s8(kernel[0]);
int8x8_t _k1 = vdup_n_s8(kernel[1]);
int8x8_t _k2 = vdup_n_s8(kernel[2]);
int8x8_t _k3 = vdup_n_s8(kernel[3]);
int8x8_t _k4 = vdup_n_s8(kernel[4]);
int8x8_t _k5 = vdup_n_s8(kernel[5]);
int8x8_t _k6 = vdup_n_s8(kernel[6]);
int8x8_t _k7 = vdup_n_s8(kernel[7]);
int8x8_t _k8 = vdup_n_s8(kernel[8]);
for (; i+1 < outh; i+=2)
{
int nn = outw >> 3;
int remain = outw & 7;
for (; nn >0; nn--)
{
int8x8_t _r0 = vld1_s8(r0);
int8x8_t _r0n = vld1_s8(r0+8);
int8x8_t _r01 = vext_s8(_r0, _r0n, 1);
int8x8_t _r02 = vext_s8(_r0, _r0n, 2);
int16x8_t _sum0 = vmull_s8(_r0, _k0);
_sum0 = vmlal_s8(_sum0, _r01, _k1);
_sum0 = vmlal_s8(_sum0, _r02, _k2);
int8x8_t _r1 = vld1_s8(r1);
int8x8_t _r1n = vld1_s8(r1+8);
int8x8_t _r11 = vext_s8(_r1, _r1n, 1);
int8x8_t _r12 = vext_s8(_r1, _r1n, 2);
_sum0 = vmlal_s8(_sum0, _r1, _k3);
_sum0 = vmlal_s8(_sum0, _r11, _k4);
_sum0 = vmlal_s8(_sum0, _r12, _k5);
int16x8_t _sum1 = vmull_s8(_r1, _k0);
_sum1 = vmlal_s8(_sum1, _r11, _k1);
_sum1 = vmlal_s8(_sum1, _r12, _k2);
int8x8_t _r2 = vld1_s8(r2);
int8x8_t _r2n = vld1_s8(r2+8);
int8x8_t _r21 = vext_s8(_r2, _r2n, 1);
int8x8_t _r22 = vext_s8(_r2, _r2n, 2);
_sum0 = vmlal_s8(_sum0, _r2, _k6);
_sum0 = vmlal_s8(_sum0, _r21, _k7);
_sum0 = vmlal_s8(_sum0, _r22, _k8);
_sum1 = vmlal_s8(_sum1, _r2, _k3);
_sum1 = vmlal_s8(_sum1, _r21, _k4);
_sum1 = vmlal_s8(_sum1, _r22, _k5);
int8x8_t _r3 = vld1_s8(r3);
int8x8_t _r3n = vld1_s8(r3+8);
int8x8_t _r31 = vext_s8(_r3, _r3n, 1);
int8x8_t _r32 = vext_s8(_r3, _r3n, 2);
_sum1 = vmlal_s8(_sum1, _r3, _k6);
_sum1 = vmlal_s8(_sum1, _r31, _k7);
_sum1 = vmlal_s8(_sum1, _r32, _k8);
int32x4_t sum0_s32 = vmovl_s16(vget_low_s16(_sum0));
int32x4_t sum0n_s32 = vmovl_s16(vget_high_s16(_sum0));
vst1q_s32(outptr0, sum0_s32);
vst1q_s32(outptr0+4, sum0n_s32);
int32x4_t sum1_s32 = vmovl_s16(vget_low_s16(_sum1));
int32x4_t sum1n_s32 = vmovl_s16(vget_high_s16(_sum1));
vst1q_s32(outptr0n, sum1_s32);
vst1q_s32(outptr0n+4, sum1n_s32);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
outptr0 += 8;
outptr0n += 8;
}
for (; remain>0; remain--)
{
//Todo Neon
int sum0 = 0;
int sum0n = 0;
sum0 += (int)r0[0] * kernel[0];
sum0 += (int)r0[1] * kernel[1];
sum0 += (int)r0[2] * kernel[2];
sum0 += (int)r1[0] * kernel[3];
sum0 += (int)r1[1] * kernel[4];
sum0 += (int)r1[2] * kernel[5];
sum0 += (int)r2[0] * kernel[6];
sum0 += (int)r2[1] * kernel[7];
sum0 += (int)r2[2] * kernel[8];
sum0n += (int)r1[0] * kernel[0];
sum0n += (int)r1[1] * kernel[1];
sum0n += (int)r1[2] * kernel[2];
sum0n += (int)r2[0] * kernel[3];
sum0n += (int)r2[1] * kernel[4];
sum0n += (int)r2[2] * kernel[5];
sum0n += (int)r3[0] * kernel[6];
sum0n += (int)r3[1] * kernel[7];
sum0n += (int)r3[2] * kernel[8];
*outptr0 = sum0;
*outptr0n = sum0n;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr0n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
for (; nn >0; nn--)
{
int8x8_t _r0 = vld1_s8(r0);
int8x8_t _r0n = vld1_s8(r0+8);
int8x8_t _r01 = vext_s8(_r0, _r0n, 1);
int8x8_t _r02 = vext_s8(_r0, _r0n, 2);
int16x8_t _sum0 = vmull_s8(_r0, _k0);
_sum0 = vmlal_s8(_sum0, _r01, _k1);
_sum0 = vmlal_s8(_sum0, _r02, _k2);
int8x8_t _r1 = vld1_s8(r1);
int8x8_t _r1n = vld1_s8(r1+8);
int8x8_t _r11 = vext_s8(_r1, _r1n, 1);
int8x8_t _r12 = vext_s8(_r1, _r1n, 2);
_sum0 = vmlal_s8(_sum0, _r1, _k3);
_sum0 = vmlal_s8(_sum0, _r11, _k4);
_sum0 = vmlal_s8(_sum0, _r12, _k5);
int8x8_t _r2 = vld1_s8(r2);
int8x8_t _r2n = vld1_s8(r2+8);
int8x8_t _r21 = vext_s8(_r2, _r2n, 1);
int8x8_t _r22 = vext_s8(_r2, _r2n, 2);
_sum0 = vmlal_s8(_sum0, _r2, _k6);
_sum0 = vmlal_s8(_sum0, _r21, _k7);
_sum0 = vmlal_s8(_sum0, _r22, _k8);
int32x4_t sum0_s32 = vmovl_s16(vget_low_s16(_sum0));
int32x4_t sum0n_s32 = vmovl_s16(vget_high_s16(_sum0));
vst1q_s32(outptr0, sum0_s32);
vst1q_s32(outptr0+4, sum0n_s32);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 8;
}
for (; remain>0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr0 = sum;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const signed char* kernel = (const signed char*)_kernel + p*9;
int* outptr = out;
const signed char* img = bottom_blob.channel(p);
const signed char* r0 = img;
const signed char* r1 = img + w;
const signed char* r2 = img + w*2;
int i = 0;
int8x8_t _k0 = vdup_n_s8(kernel[0]);
int8x8_t _k1 = vdup_n_s8(kernel[1]);
int8x8_t _k2 = vdup_n_s8(kernel[2]);
int8x8_t _k3 = vdup_n_s8(kernel[3]);
int8x8_t _k4 = vdup_n_s8(kernel[4]);
int8x8_t _k5 = vdup_n_s8(kernel[5]);
int8x8_t _k6 = vdup_n_s8(kernel[6]);
int8x8_t _k7 = vdup_n_s8(kernel[7]);
int8x8_t _k8 = vdup_n_s8(kernel[8]);
for (; i < outh; i++)
{
int nn = outw >> 3;
int remain = outw & 7;
for (; nn > 0; nn--)
{
int8x8x2_t _r0 = vld2_s8(r0);
int8x8x2_t _r0n = vld2_s8(r0+16);
int8x8_t _r00 = _r0.val[0];
int8x8_t _r01 = _r0.val[1];
int8x8_t _r02 = vext_s8(_r00, _r0n.val[0], 1);
int16x8_t _sum = vmull_s8(_r00, _k0);
_sum = vmlal_s8(_sum, _r01, _k1);
_sum = vmlal_s8(_sum, _r02, _k2);
int8x8x2_t _r1 = vld2_s8(r1);
int8x8x2_t _r1n = vld2_s8(r1+16);
int8x8_t _r10 = _r1.val[0];
int8x8_t _r11 = _r1.val[1];
int8x8_t _r12 = vext_s8(_r10, _r1n.val[0], 1);
_sum = vmlal_s8(_sum, _r10, _k3);
_sum = vmlal_s8(_sum, _r11, _k4);
_sum = vmlal_s8(_sum, _r12, _k5);
int8x8x2_t _r2 = vld2_s8(r2);
int8x8x2_t _r2n = vld2_s8(r2+16);
int8x8_t _r20 = _r2.val[0];
int8x8_t _r21 = _r2.val[1];
int8x8_t _r22 = vext_s8(_r20, _r2n.val[0], 1);
_sum = vmlal_s8(_sum, _r20, _k6);
_sum = vmlal_s8(_sum, _r21, _k7);
_sum = vmlal_s8(_sum, _r22, _k8);
int32x4_t sum0_s32 = vmovl_s16(vget_low_s16(_sum));
int32x4_t sum0n_s32 = vmovl_s16(vget_high_s16(_sum));
vst1q_s32(outptr, sum0_s32);
vst1q_s32(outptr+4, sum0n_s32);
r0 += 16;
r1 += 16;
r2 += 16;
outptr += 8;
}
for (; remain>0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
#else // __aarch64__
static void convdw3x3s1_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p = 0; p < outch; p++)
{
Mat out = top_blob.channel(p);
const signed char* kernel = (const signed char *)_kernel + p*9;
int* outptr0 = out;
int* outptr0n = outptr0 + outw;
const signed char* img0 = bottom_blob.channel(p);
const signed char* r0 = img0;
const signed char* r1 = img0 + w;
const signed char* r2 = img0 + w*2;
const signed char* r3 = img0 + w*3;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i+1 < outh; i+=2)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
for (; nn >0; nn--)
{
// r0
int8x8_t _r0 = vld1_s8(r0);
int8x8_t _r0n = vld1_s8(r0+8);
int8x8_t _r01 = vext_s8(_r0, _r0n, 1);
int8x8_t _r02 = vext_s8(_r0, _r0n, 2);
int16x8_t _r0_s16 = vmovl_s8(_r0); // r00 - r07
int16x8_t _r01_s16 = vmovl_s8(_r01); // r01 - r08
int16x8_t _r02_s16 = vmovl_s8(_r02); // r02 - r09
int32x4_t _sum0 = vmull_lane_s16(vget_low_s16(_r0_s16), _k0123, 0); // (r00 - r07) * k00
int32x4_t _sum0n = vmull_lane_s16(vget_high_s16(_r0_s16), _k0123, 0);
int32x4_t _sum1 = vmull_lane_s16(vget_low_s16(_r01_s16), _k0123, 1); // (r01 - r08) * k01
int32x4_t _sum1n = vmull_lane_s16(vget_high_s16(_r01_s16), _k0123, 1);
int32x4_t _sum2 = vmull_lane_s16(vget_low_s16(_r02_s16), _k0123, 2); // (r02 - r09) * k02
int32x4_t _sum2n = vmull_lane_s16(vget_high_s16(_r02_s16), _k0123, 2);
// r1
int8x8_t _r1 = vld1_s8(r1);
int8x8_t _r1n = vld1_s8(r1+8);
int8x8_t _r11 = vext_s8(_r1, _r1n, 1);
int8x8_t _r12 = vext_s8(_r1, _r1n, 2);
int16x8_t _r1_s16 = vmovl_s8(_r1); // r10 - r17
int16x8_t _r11_s16 = vmovl_s8(_r11); // r11 - r18
int16x8_t _r12_s16 = vmovl_s8(_r12); // r12 - r19
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r1_s16), _k0123, 3); // (r10 - r17) * k03
_sum0n = vmlal_lane_s16(_sum0n, vget_high_s16(_r1_s16), _k0123, 3);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_r11_s16), _k4567, 0); // (r11 - r18) * k04
_sum1n = vmlal_lane_s16(_sum1n, vget_high_s16(_r11_s16), _k4567, 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r12_s16), _k4567, 1); // (r12 - r19) * k05
_sum2n = vmlal_lane_s16(_sum2n, vget_high_s16(_r12_s16), _k4567, 1);
int32x4_t _sum4 = vmull_lane_s16(vget_low_s16(_r1_s16), _k0123, 0); // (r10 - r17) * k00
int32x4_t _sum4n = vmull_lane_s16(vget_high_s16(_r1_s16), _k0123, 0);
int32x4_t _sum5 = vmull_lane_s16(vget_low_s16(_r11_s16), _k0123, 1); // (r11 - r18) * k01
int32x4_t _sum5n = vmull_lane_s16(vget_high_s16(_r11_s16), _k0123, 1);
int32x4_t _sum6 = vmull_lane_s16(vget_low_s16(_r12_s16), _k0123, 2); // (r12 - r19) * k02
int32x4_t _sum6n = vmull_lane_s16(vget_high_s16(_r12_s16), _k0123, 2);
// r2
int8x8_t _r2 = vld1_s8(r2);
int8x8_t _r2n = vld1_s8(r2+8);
int8x8_t _r21 = vext_s8(_r2, _r2n, 1);
int8x8_t _r22 = vext_s8(_r2, _r2n, 2);
int16x8_t _r2_s16 = vmovl_s8(_r2); // r20 - r27
int16x8_t _r21_s16 = vmovl_s8(_r21); // r21 - r28
int16x8_t _r22_s16 = vmovl_s8(_r22); // r22 - r29
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r2_s16), _k4567, 2); // (r20 - r27) * k06
_sum0n = vmlal_lane_s16(_sum0n, vget_high_s16(_r2_s16), _k4567, 2);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_r21_s16), _k4567, 3); // (r21 - r28) * k07
_sum1n = vmlal_lane_s16(_sum1n, vget_high_s16(_r21_s16), _k4567, 3);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r22_s16), _k8xxx, 0); // (r22 - r29) * k08
_sum2n = vmlal_lane_s16(_sum2n, vget_high_s16(_r22_s16), _k8xxx, 0);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_r2_s16), _k0123, 3); // (r20 - r27) * k03
_sum4n = vmlal_lane_s16(_sum4n, vget_high_s16(_r2_s16), _k0123, 3);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_r21_s16), _k4567, 0); // (r21 - r28) * k04
_sum5n = vmlal_lane_s16(_sum5n, vget_high_s16(_r21_s16), _k4567, 0);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_r22_s16), _k4567, 1); // (r22 - r29) * k05
_sum6n = vmlal_lane_s16(_sum6n, vget_high_s16(_r22_s16), _k4567, 1);
// r3
int8x8_t _r3 = vld1_s8(r3);
int8x8_t _r3n = vld1_s8(r3+8);
int8x8_t _r31 = vext_s8(_r3, _r3n, 1);
int8x8_t _r32 = vext_s8(_r3, _r3n, 2);
int16x8_t _r3_s16 = vmovl_s8(_r3); // r30 - r37
int16x8_t _r31_s16 = vmovl_s8(_r31); // r31 - r38
int16x8_t _r32_s16 = vmovl_s8(_r32); // r32 - r39
_sum0 = vaddq_s32(_sum0, _sum1);
_sum0n = vaddq_s32(_sum0n, _sum1n);
_sum2 = vaddq_s32(_sum2, _sum0);
_sum2n = vaddq_s32(_sum2n, _sum0n);
vst1q_s32(outptr0, _sum2);
vst1q_s32(outptr0+4, _sum2n);
_sum4 = vmlal_lane_s16(_sum4, vget_low_s16(_r3_s16), _k4567, 2); // (r30 - r37) * k06
_sum4n = vmlal_lane_s16(_sum4n, vget_high_s16(_r3_s16), _k4567, 2);
_sum5 = vmlal_lane_s16(_sum5, vget_low_s16(_r31_s16), _k4567, 3); // (r31 - r38) * k07
_sum5n = vmlal_lane_s16(_sum5n, vget_high_s16(_r31_s16), _k4567, 3);
_sum6 = vmlal_lane_s16(_sum6, vget_low_s16(_r32_s16), _k8xxx, 0); // (r32 - r39) * k08
_sum6n = vmlal_lane_s16(_sum6n, vget_high_s16(_r32_s16), _k8xxx, 0);
_sum4 = vaddq_s32(_sum4, _sum5);
_sum4n = vaddq_s32(_sum4n, _sum5n);
_sum6 = vaddq_s32(_sum6, _sum4);
_sum6n = vaddq_s32(_sum6n, _sum4n);
vst1q_s32(outptr0n, _sum6);
vst1q_s32(outptr0n+4, _sum6n);
r0 += 8;
r1 += 8;
r2 += 8;
r3 += 8;
outptr0 += 8;
outptr0n += 8;
}
#endif // __ARM_NEON
for (; remain>0; remain--)
{
// TODO NEON
int sum0 = 0;
int sum0n = 0;
sum0 += (int)r0[0] * kernel[0];
sum0 += (int)r0[1] * kernel[1];
sum0 += (int)r0[2] * kernel[2];
sum0 += (int)r1[0] * kernel[3];
sum0 += (int)r1[1] * kernel[4];
sum0 += (int)r1[2] * kernel[5];
sum0 += (int)r2[0] * kernel[6];
sum0 += (int)r2[1] * kernel[7];
sum0 += (int)r2[2] * kernel[8];
sum0n += (int)r1[0] * kernel[0];
sum0n += (int)r1[1] * kernel[1];
sum0n += (int)r1[2] * kernel[2];
sum0n += (int)r2[0] * kernel[3];
sum0n += (int)r2[1] * kernel[4];
sum0n += (int)r2[2] * kernel[5];
sum0n += (int)r3[0] * kernel[6];
sum0n += (int)r3[1] * kernel[7];
sum0n += (int)r3[2] * kernel[8];
*outptr0 = sum0;
*outptr0n = sum0n;
r0++;
r1++;
r2++;
r3++;
outptr0++;
outptr0n++;
}
r0 += 2 + w;
r1 += 2 + w;
r2 += 2 + w;
r3 += 2 + w;
outptr0 += outw;
outptr0n += outw;
}
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
for (; nn >0; nn--)
{
// r0
int8x8_t _r0 = vld1_s8(r0);
int8x8_t _r0n = vld1_s8(r0+8);
int8x8_t _r01 = vext_s8(_r0, _r0n, 1);
int8x8_t _r02 = vext_s8(_r0, _r0n, 2);
int16x8_t _r0_s16 = vmovl_s8(_r0); // r00 - r07
int16x8_t _r01_s16 = vmovl_s8(_r01); // r01 - r08
int16x8_t _r02_s16 = vmovl_s8(_r02); // r02 - r09
int32x4_t _sum0 = vmull_lane_s16(vget_low_s16(_r0_s16), _k0123, 0); // (r00 - r07) * k00
int32x4_t _sum0n = vmull_lane_s16(vget_high_s16(_r0_s16), _k0123, 0);
int32x4_t _sum1 = vmull_lane_s16(vget_low_s16(_r01_s16), _k0123, 1); // (r01 - r08) * k01
int32x4_t _sum1n = vmull_lane_s16(vget_high_s16(_r01_s16), _k0123, 1);
int32x4_t _sum2 = vmull_lane_s16(vget_low_s16(_r02_s16), _k0123, 2); // (r02 - r09) * k02
int32x4_t _sum2n = vmull_lane_s16(vget_high_s16(_r02_s16), _k0123, 2);
// r1
int8x8_t _r1 = vld1_s8(r1);
int8x8_t _r1n = vld1_s8(r1+8);
int8x8_t _r11 = vext_s8(_r1, _r1n, 1);
int8x8_t _r12 = vext_s8(_r1, _r1n, 2);
int16x8_t _r1_s16 = vmovl_s8(_r1); // r10 - r17
int16x8_t _r11_s16 = vmovl_s8(_r11); // r11 - r18
int16x8_t _r12_s16 = vmovl_s8(_r12); // r12 - r19
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r1_s16), _k0123, 3); // (r10 - r17) * k03
_sum0n = vmlal_lane_s16(_sum0n, vget_high_s16(_r1_s16), _k0123, 3);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_r11_s16), _k4567, 0); // (r11 - r18) * k04
_sum1n = vmlal_lane_s16(_sum1n, vget_high_s16(_r11_s16), _k4567, 0);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r12_s16), _k4567, 1); // (r12 - r19) * k05
_sum2n = vmlal_lane_s16(_sum2n, vget_high_s16(_r12_s16), _k4567, 1);
// r2
int8x8_t _r2 = vld1_s8(r2);
int8x8_t _r2n = vld1_s8(r2+8);
int8x8_t _r21 = vext_s8(_r2, _r2n, 1);
int8x8_t _r22 = vext_s8(_r2, _r2n, 2);
int16x8_t _r2_s16 = vmovl_s8(_r2); // r20 - r27
int16x8_t _r21_s16 = vmovl_s8(_r21); // r21 - r28
int16x8_t _r22_s16 = vmovl_s8(_r22); // r22 - r29
_sum0 = vmlal_lane_s16(_sum0, vget_low_s16(_r2_s16), _k4567, 2); // (r20 - r27) * k06
_sum0n = vmlal_lane_s16(_sum0n, vget_high_s16(_r2_s16), _k4567, 2);
_sum1 = vmlal_lane_s16(_sum1, vget_low_s16(_r21_s16), _k4567, 3); // (r21 - r28) * k07
_sum1n = vmlal_lane_s16(_sum1n, vget_high_s16(_r21_s16), _k4567, 3);
_sum2 = vmlal_lane_s16(_sum2, vget_low_s16(_r22_s16), _k8xxx, 0); // (r22 - r29) * k08
_sum2n = vmlal_lane_s16(_sum2n, vget_high_s16(_r22_s16), _k8xxx, 0);
_sum0 = vaddq_s32(_sum0, _sum1);
_sum0n = vaddq_s32(_sum0n, _sum1n);
_sum2 = vaddq_s32(_sum2, _sum0);
_sum2n = vaddq_s32(_sum2n, _sum0n);
vst1q_s32(outptr0, _sum2);
vst1q_s32(outptr0+4, _sum2n);
r0 += 8;
r1 += 8;
r2 += 8;
outptr0 += 8;
}
#endif // __ARM_NEON
for (; remain>0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr0 = sum;
r0++;
r1++;
r2++;
outptr0++;
}
r0 += 2;
r1 += 2;
r2 += 2;
}
}
}
static void convdw3x3s2_int8_neon(const Mat &bottom_blob, Mat &top_blob, const Mat &_kernel, const Option& opt)
{
int w = bottom_blob.w;
int outw = top_blob.w;
int outh = top_blob.h;
int outch = top_blob.c;
const int tailstep = w - 2*outw + w;
#pragma omp parallel for num_threads(opt.num_threads)
for (int p=0; p<outch; p++)
{
Mat out = top_blob.channel(p);
const signed char* kernel = (const signed char*)_kernel + p*9;
int* outptr = out;
const signed char* img = bottom_blob.channel(p);
const signed char* r0 = img;
const signed char* r1 = img + w;
const signed char* r2 = img + w*2;
int i = 0;
#if __ARM_NEON
int8x16_t _k0123456789x = vld1q_s8(kernel);
int16x8_t _k_s16 = vmovl_s8(vget_low_s8(_k0123456789x));
int16x8_t _kn_s16 = vmovl_s8(vget_high_s8(_k0123456789x));
int16x4_t _k0123 = vget_low_s16(_k_s16);
int16x4_t _k4567 = vget_high_s16(_k_s16);
int16x4_t _k8xxx = vget_low_s16(_kn_s16);
#endif // __ARM_NEON
for (; i < outh; i++)
{
#if __ARM_NEON
int nn = outw >> 3;
int remain = outw & 7;
#else
int remain = outw;
#endif // __ARM_NEON
#if __ARM_NEON
for (; nn > 0; nn--)
{
// r0
int8x8x2_t _r0 = vld2_s8(r0);
int8x8x2_t _r0n = vld2_s8(r0+16);
int8x8_t _r00 = _r0.val[0]; // r00 - r014
int8x8_t _r01 = _r0.val[1]; // r01 - r015
int8x8_t _r02 = vext_s8(_r00, _r0n.val[0], 1); // r02 - r016
int16x8_t _r00_s16 = vmovl_s8(_r00); // r00 - r014
int16x8_t _r01_s16 = vmovl_s8(_r01); // r01 - r015
int16x8_t _r02_s16 = vmovl_s8(_r02); // r02 - r016
int32x4_t _sum0_s32 = vmull_lane_s16(vget_low_s16(_r00_s16), _k0123, 0); // (r00-r06) * k00
int32x4_t _sum0n_s32 = vmull_lane_s16(vget_high_s16(_r00_s16), _k0123, 0);
int32x4_t _sum1_s32 = vmull_lane_s16(vget_low_s16(_r01_s16), _k0123, 1); // (r01-r07) * k01
int32x4_t _sum1n_s32 = vmull_lane_s16(vget_high_s16(_r01_s16), _k0123, 1);
int32x4_t _sum2_s32 = vmull_lane_s16(vget_low_s16(_r02_s16), _k0123, 2); // (r02-r08) * k02
int32x4_t _sum2n_s32 = vmull_lane_s16(vget_high_s16(_r02_s16), _k0123, 2);
// r1
int8x8x2_t _r1 = vld2_s8(r1);
int8x8x2_t _r1n = vld2_s8(r1+16);
int8x8_t _r10 = _r1.val[0]; // r10 - r114
int8x8_t _r11 = _r1.val[1]; // r11 - r115
int8x8_t _r12 = vext_s8(_r10, _r1n.val[0], 1); // r12 - r116
int16x8_t _r10_s16 = vmovl_s8(_r10); // r10 - r114
int16x8_t _r11_s16 = vmovl_s8(_r11); // r11 - r115
int16x8_t _r12_s16 = vmovl_s8(_r12); // r12 - r116
_sum0_s32 = vmlal_lane_s16(_sum0_s32, vget_low_s16(_r10_s16), _k0123, 3); // (r10-r16) * k03
_sum0n_s32 = vmlal_lane_s16(_sum0n_s32, vget_high_s16(_r10_s16), _k0123, 3);
_sum1_s32 = vmlal_lane_s16(_sum1_s32, vget_low_s16(_r11_s16), _k4567, 0); // (r11-r17) * k04
_sum1n_s32 = vmlal_lane_s16(_sum1n_s32, vget_high_s16(_r11_s16), _k4567, 0);
_sum2_s32 = vmlal_lane_s16(_sum2_s32, vget_low_s16(_r12_s16), _k4567, 1); // (r12-r18) * k05
_sum2n_s32 = vmlal_lane_s16(_sum2n_s32, vget_high_s16(_r12_s16), _k4567, 1);
// r2
int8x8x2_t _r2 = vld2_s8(r2);
int8x8x2_t _r2n = vld2_s8(r2+16);
int8x8_t _r20 = _r2.val[0]; // r20 - r214
int8x8_t _r21 = _r2.val[1]; // r21 - r215
int8x8_t _r22 = vext_s8(_r20, _r2n.val[0], 1); // r22 - r216
int16x8_t _r20_s16 = vmovl_s8(_r20); // r20 - r214
int16x8_t _r21_s16 = vmovl_s8(_r21); // r21 - r215
int16x8_t _r22_s16 = vmovl_s8(_r22); // r22 - r216
_sum0_s32 = vmlal_lane_s16(_sum0_s32, vget_low_s16(_r20_s16), _k4567, 2); // (r20-r26) * k06
_sum0n_s32 = vmlal_lane_s16(_sum0n_s32, vget_high_s16(_r20_s16), _k4567, 2);
_sum1_s32 = vmlal_lane_s16(_sum1_s32, vget_low_s16(_r21_s16), _k4567, 3); // (r21-r27) * k07
_sum1n_s32 = vmlal_lane_s16(_sum1n_s32, vget_high_s16(_r21_s16), _k4567, 3);
_sum2_s32 = vmlal_lane_s16(_sum2_s32, vget_low_s16(_r22_s16), _k8xxx, 0); // (r22-r28) * k08
_sum2n_s32 = vmlal_lane_s16(_sum2n_s32, vget_high_s16(_r22_s16), _k8xxx, 0);
_sum0_s32 = vaddq_s32(_sum0_s32, _sum1_s32);
_sum0n_s32 = vaddq_s32(_sum0n_s32, _sum1n_s32);
_sum2_s32 = vaddq_s32(_sum2_s32, _sum0_s32);
_sum2n_s32 = vaddq_s32(_sum2n_s32, _sum0n_s32);
vst1q_s32(outptr, _sum2_s32);
vst1q_s32(outptr+4, _sum2n_s32);
r0 += 16;
r1 += 16;
r2 += 16;
outptr += 8;
}
#endif // __ARM_NEON
for (; remain>0; remain--)
{
int sum = 0;
sum += (int)r0[0] * kernel[0];
sum += (int)r0[1] * kernel[1];
sum += (int)r0[2] * kernel[2];
sum += (int)r1[0] * kernel[3];
sum += (int)r1[1] * kernel[4];
sum += (int)r1[2] * kernel[5];
sum += (int)r2[0] * kernel[6];
sum += (int)r2[1] * kernel[7];
sum += (int)r2[2] * kernel[8];
*outptr = sum;
r0 += 2;
r1 += 2;
r2 += 2;
outptr++;
}
r0 += tailstep;
r1 += tailstep;
r2 += tailstep;
}
}
}
#endif
|
gate.h | /**
* @file gate.h
* @author Nader KHAMMASSI - nader.khammassi@gmail.com
* @date 02-10-15
* @brief
*/
#pragma once
#ifndef QX_GATE_H
#define QX_GATE_H
#include <map>
#include <inttypes.h>
#include <immintrin.h> // avx
#include <emmintrin.h> // sse
#include <algorithm>
#include "qx/core/hash_set.h"
#include "qx/core/linalg.h"
#include "qx/core/register.h"
#include "qx/core/binary_counter.h"
#include "qx/core/kronecker.h"
#include "qx/compat.h"
#include <chrono>
#ifdef USE_OPENMP
#include <omp.h>
#endif
// #ifndef __BUILTIN_LINALG__
// #include <boost/numeric/ublas/matrix.hpp>
// #endif
#define SQRT_2 (1.4142135623730950488016887242096980785696718753769480731766797379f)
#define R_SQRT_2 (0.7071067811865475244008443621048490392848359376884740365883398690f)
#define __bit_test(x,pos) ((x) & (1<<(pos)))
#define __bit_set(x,pos) ((x) | (1<<(pos)))
#define __bit_flip(x,pos) ((x) ^ (1<<(pos)))
#define __bit_reset(x,pos) ((x) & ~(1<<(pos)))
#define __AVX__NO
#define __OP_PREFETCH__
//#define SQRT_2 (1.41421356237309504880f)
//#define R_SQRT_2 (0.70710678118654752440f)
#define ROUND_DOWN(x, s) ((x) & ~((s)-1))
#define IS_ODD(x) (x & 1)
namespace qx
{
/**
* types definition
*/
typedef uint64_t basis_state_t;
typedef std::map<basis_state_t,complex_t> quantum_state_t;
typedef enum __gate_type_t
{
__identity_gate__,
__hadamard_gate__,
__pauli_x_gate__ ,
__pauli_y_gate__ ,
__pauli_z_gate__ ,
__cnot_gate__ ,
__toffoli_gate__ ,
__swap_gate__ ,
__phase_gate__ ,
__rx_gate__ ,
__ry_gate__ ,
__rz_gate__ ,
__cphase_gate__ ,
__t_gate__ ,
__tdag_gate__ ,
__sdag_gate__ ,
__custom_gate__ ,
__prepx_gate__ ,
__prepy_gate__ ,
__prepz_gate__ ,
__measure_gate__ ,
__measure_reg_gate__,
__measure_x_gate__ ,
__measure_x_reg_gate__,
__measure_y_gate__ ,
__measure_y_reg_gate__,
__ctrl_phase_shift_gate__,
__parallel_gate__,
__display__,
__display_binary__,
__print_str__,
__bin_ctrl_gate__,
__lookup_table__,
__classical_not_gate__,
__qft_gate__,
__prepare_gate__,
__unitary_gate__
} gate_type_t;
/**
* gates coeffecients
*/
QX_ALIGNED(64) const complex_t cnot_c [] = { complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(0.0), complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(1.0), complex_t(0.0) }; /* CNOT */
QX_ALIGNED(64) const complex_t swap_c [] = { 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0 }; /* SWAP */
QX_ALIGNED(64) const complex_t identity_c [] = { complex_t(1.0), complex_t(0.0), complex_t(0.0), complex_t(1.0) }; /* I */
QX_ALIGNED(64) const complex_t pauli_x_c [] = { complex_t(0.0, 0.0) , complex_t(1.0, 0.0), complex_t(1.0, 0.0) , complex_t(0.0, 0.0) }; /* X */
QX_ALIGNED(64) const complex_t pauli_y_c [] = { complex_t(0.0, 0.0) , complex_t(0.0,-1.0), complex_t(0.0, 1.0) , complex_t(0.0, 0.0) }; /* Y */
QX_ALIGNED(64) const complex_t pauli_z_c [] = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(-1.0,0.0) }; /* Z */
QX_ALIGNED(64) const complex_t phase_c [] = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(0.0, 1.0) }; /* S */
QX_ALIGNED(64) const complex_t sdag_gate_c[] = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(0.0, -1.0) }; /* S_dag */
QX_ALIGNED(64) const complex_t t_gate_c [] = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(cos(QX_PI/4),sin(QX_PI/4)) }; /* T */
QX_ALIGNED(64) const complex_t tdag_gate_c[] = { complex_t(1.0, 0.0) , complex_t(0.0, 0.0), complex_t(0.0, 0.0) , complex_t(cos(QX_PI/4),-sin(QX_PI/4)) }; /* T_dag */
QX_ALIGNED(64) const complex_t hadamard_c [] = { R_SQRT_2, R_SQRT_2, R_SQRT_2, -R_SQRT_2 }; /* H */
#define __rc(r,c,s) (r*s+c)
/**
* \brief common abstract gate interface for
* all gates implementation.
*/
class gate
{
public:
virtual int64_t apply(qu_register& qureg) = 0;
virtual std::vector<uint64_t> qubits() = 0;
virtual std::vector<uint64_t> control_qubits() = 0;
virtual std::vector<uint64_t> target_qubits() = 0;
virtual gate_type_t type() = 0;
virtual std::string micro_code() { return "# unsupported operation : qubit out of range"; }
virtual void dump() = 0;
virtual ~gate() { };
virtual void set_duration(uint64_t d) { duration = d; }
virtual uint64_t get_duration() { return duration; }
protected:
uint64_t duration;
};
/**
* \brief rotation in the x-z plane with a given
* angle theta (see "Large scale simulation of
* error-prone quantum systems" p.39" [Niwa 2002])
*/
inline cmatrix_t rotation(double theta)
{
cmatrix_t r; // (2,2);
r(0,0) = complex_t(cos(theta),0); r(0,1) = complex_t(-sin(theta),0);
r(1,0) = complex_t(sin(theta),0); r(1,1) = complex_t(cos(theta),0);
return r;
}
/**
* \brief phase shift for a given angle phi
*/
inline cmatrix_t phase(double phi)
{
cmatrix_t p; // (2,2);
p(0,0) = complex_t(1,0); p(0,1) = complex_t(0,0);
p(1,0) = complex_t(0,0); p(1,1) = complex_t(cos(phi),sin(phi));
return p;
}
/**
* \brief generate noisy hadamard gate
*/
cmatrix_t noisy_hadamard(double epsilon1=0, double epsilon2=0)
{
#ifdef __BUILTIN_LINALG__
return mxm(rotation(QX_PI/4 + epsilon1), phase(QX_PI + epsilon2));
#else
cmatrix_t rz = rotation(QX_PI/4 + epsilon1);
cmatrix_t p = phase(QX_PI + epsilon2);
return mxm(rz,p);
#endif
}
/**
* \brief build n x n matrix from an array
*/
cmatrix_t build_matrix(const complex_t * c, uint64_t n)
{
// assert(n==2);
// TO DO : remove the n parameter
cmatrix_t m; // (n,n);
for (std::size_t i=0; i<n; i++)
for (std::size_t j=0; j<n; j++)
m(i,j) = c[i*n+j];
return m;
}
/**
* sqg_apply
*/
#ifdef QX_COMPACT_GATE_OP
inline void sqg_apply(cmatrix_t & cm, uint64_t qubit, qu_register& qureg)
{
uint64_t n = qureg.size();
matrix_t m(2,row_t(2,0));
m[0][0] = cm(0,0); m[0][1] = cm(0,1);
m[1][0] = cm(1,0); m[1][1] = cm(1,1);
if (qubit == 0)
{
identity id(1UL << (n-1));
unitary_matrix um(cm.size1(),m);
kronecker k(&id, &um);
cvector_t r(qureg.get_data());
mulmv(k,qureg.get_data(),r);
qureg = r;
}
else if (qubit == n-1)
{
identity id(1UL << (n-1));
unitary_matrix um(cm.size1(),m);
kronecker k(&um, &id);
cvector_t r(qureg.get_data());
mulmv(k,qureg.get_data(),r);
qureg = r;
}
else
{
identity id1(1UL << (qubit));
identity id2(1UL << (n-qubit-1));
unitary_matrix um(cm.size1(),m);
kronecker k(&id2, &um, &id1);
cvector_t r(qureg.get_data());
mulmv(k,qureg.get_data(),r);
qureg = r;
}
}
/**
* u on the kth qubit :
* non-null value in each row of the kronocker matrix:
* for each row r :
* c1 = r || 000100 // 1 at the n-k bit
* c2 = r || 000000
*/
// #elif QX_SPARSE_MV_MUL
#else // QX_SPARSE_MV_MUL
uint64_t rw_process(uint64_t is, uint64_t ie, uint64_t s, uint64_t n, uint64_t qubit, const kronecker * m, cvector_t * v, cvector_t * res)
{
uint64_t k = n-qubit;
// println("run : " << is << " .. " << ie);
complex_t * pv = v->data();
complex_t * pr = res->data();
size_t nk = n-k;
for (uint64_t r=is; r<ie; ++r)
{
size_t bc = r;
size_t c1 = __bit_reset(bc,nk);
size_t c2 = __bit_set(bc,nk);
// complex_t s; // = 0;
pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2));
}
return 0;
}
void sparse_mulmv(uint64_t n, uint64_t qubit, const kronecker& m, cvector_t& v, cvector_t& res)
{
uint64_t k = n-qubit;
uint64_t rows = (1UL << n);
uint64_t z = 0UL;
/*xpu::task rw_t(rw_process,0,0,0,n,qubit,&m,&v,&res);
xpu::parallel_for process(z,rows,1,&rw_t);
process.run();*/
rw_process(z,rows,0UL,n,qubit,&m,&v,&res);
}
void __apply_m(std::size_t start, std::size_t end, const std::size_t qubit, complex_t * state, const std::size_t stride0, const std::size_t stride1, const complex_t * matrix)
{
#if 0
__m128d m00 = matrix[0].xmm;
__m128d m01 = matrix[1].xmm;
__m128d m10 = matrix[2].xmm;
__m128d m11 = matrix[3].xmm;
#endif
complex_t m00 = matrix[0];
complex_t m01 = matrix[1];
complex_t m10 = matrix[2];
complex_t m11 = matrix[3];
#ifdef USE_OPENMP
#pragma omp parallel for // shared(m00,m01,m10,m11)
#endif
for(int64_t offset = start; offset < (int64_t)end; offset += (1UL << (qubit + 1)))
for(size_t i = (size_t)offset; i < (size_t)offset + (1UL << qubit); i++)
{
size_t i0 = i + stride0;
size_t i1 = i + stride1;
complex_t in0 = state[i0];
complex_t in1 = state[i1];
state[i0] = m00*in0+m01*in1;
state[i1] = m10*in0+m11*in1;
#if 0
__m128d in0 = state[i0].xmm;
__m128d in1 = state[i1].xmm;
state[i0].xmm = _mm_add_pd(xpu::_mm_mulc_pd(m00, in0), xpu::_mm_mulc_pd(m10, in1));
state[i1].xmm = _mm_add_pd(xpu::_mm_mulc_pd(m10, in1), xpu::_mm_mulc_pd(m11, in1));
#endif
}
}
#ifdef __SSE__
// #ifdef __FMA__
void __apply_x(std::size_t start, std::size_t end, const std::size_t qubit, complex_t * state, const std::size_t stride0, const std::size_t stride1, const complex_t * matrix)
{
#ifdef USE_OPENMP
#pragma omp parallel for // private(m00,r00,neg)
#endif
for(int64_t offset = start; offset < (int64_t)end; offset += (1UL << (qubit + 1UL)))
for(size_t i = (size_t)offset; i < (size_t)offset + (1UL << qubit); i++)
{
size_t i0 = i + stride0;
size_t i1 = i + stride1;
__m128d xin0 = state[i0].xmm; // _mm_load_pd((double*)&(state[i0].xmm));
// __m128d xin1 = state[i1].xmm; // _mm_load_pd((double*)&(state[i1].xmm));
state[i0].xmm = state[i1].xmm;
state[i1].xmm = xin0;
}
}
// #else
// #error "FMA not available !"
// #endif // FMA
#else
#error "SSE not available !"
#endif // SSE
#ifdef __SSE__
// #ifdef __FMA__
void __apply_h(std::size_t start, std::size_t end, const std::size_t qubit, complex_t * state, const std::size_t stride0, const std::size_t stride1, const complex_t * matrix)
{
__m128d m00 = matrix[0].xmm;
__m128d r00 = _mm_shuffle_pd(m00,m00,3); // 1 cyc
__m128d neg = _mm_set1_pd(-0.0f);
#ifdef USE_OPENMP
#pragma omp parallel for // private(m00,r00,neg)
#endif
for(int64_t offset = start; offset < (int64_t)end; offset += (1UL << (qubit + 1UL)))
for(size_t i = (size_t)offset; i < (size_t)offset + (1UL << qubit); i++)
{
size_t i0 = i + stride0;
size_t i1 = i + stride1;
__m128d xin0 = state[i0].xmm; // _mm_load_pd((double*)&(state[i0].xmm));
__m128d xin1 = state[i1].xmm; // _mm_load_pd((double*)&(state[i1].xmm));
__m128d t2; // = _mm_shuffle_pd(m01,m01,3); // 1 cyc
__m128d t1 = _mm_mul_pd(xin0,r00); // 5 cyc
#ifdef __FMA__
__m128d xi0 = _mm_fmadd_pd (xin1,r00, t1); // x2*t2+t1 // 5 cyc
#else
__m128d xi0 = _mm_mul_pd(xin1,r00);
xi0 = _mm_add_pd(xi0,t1); // x2*t2+t1 // 5 cyc
#endif // __FMA__
// t2 = _mm_shuffle_pd(m11,m11,3); // 1 cyc
t2 = _mm_xor_pd(r00,neg); // 1 cyc (m11=-m00)
#ifdef __FMA__
__m128d xi1 = _mm_fmadd_pd (xin1, t2, t1); // x2*t2+t1 // 5 cyc
#else
__m128d xi1 = _mm_mul_pd(xin1,t2);
xi1 = _mm_add_pd(xi1,t1); // x2*t2+t1 // 5 cyc
#endif
state[i0].xmm = xi0; // _mm_store_pd((double*)(&state[i0].xmm),xi0);
state[i1].xmm = xi1; // _mm_store_pd((double*)(&state[i1].xmm),xi1);
}
}
// #else
// #error "FMA not available !"
// #endif // FMA
#else
#error "SSE not available !"
#endif // SSE
uint64_t rw_process_ui(uint64_t is, uint64_t ie, uint64_t s, uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t * v, cvector_t * res)
{
uint64_t k = n-qubit;
// println("run : " << is << " .. " << ie);
complex_t * pv = v->data();
complex_t * pr = res->data();
size_t bc, c1, c2;
size_t nk = n-k;
for (uint64_t r=is; r<ie; ++r)
{
bc = r;
c1 = __bit_reset(bc,nk);
c2 = __bit_set(bc,nk);
bc++;
#ifdef __OP_PREFETCH__
_mm_prefetch((char*)&pv[__bit_reset(bc,nk)],_MM_HINT_T0);
_mm_prefetch((char*)&pv[__bit_set(bc,nk)],_MM_HINT_T0);
#endif // __OP_PREFETCH__
#ifdef __AVX__
// cxc
xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]);
// cxr
// pr[r].xmm = complex_t::_mm256_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm);
#elif __SSE__
// complex_t s; // = 0;
//pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2));
// --- cc mul add ---
pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm);
// --- cr mul add --- pr[r].xmm = _mm_add_pd(complex_t::mul_cr(pv[c1].xmm,m.get(r,c1).xmm), complex_t::mul_cr(pv[c2].xmm,m.get(r,c2).xmm));
// --- f. mul add ---
// pr[r].xmm = complex_t::_mm_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm);
#else
pr[r] = (pv[c1]*(m.get(r,c1))) + (pv[c2]*(m.get(r,c2)));
#endif
}
return 0;
}
void sparse_mulmv(uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t& v, cvector_t& res)
{
uint64_t k = n-qubit;
uint64_t rows = (1UL << n);
uint64_t z = 0;
#ifdef SEQUENTIAL
rw_process_ui(z,rows,1,n,qubit,m,&v,&res);
#else
/*xpu::task rw_t(rw_process_ui,0,0,0,n,qubit,m,&v,&res);
xpu::parallel_for process(z,rows,1,&rw_t);
process.run();*/
rw_process_ui(z,rows,0,n,qubit,m,&v,&res);
#endif
}
uint64_t rw_process_iu(uint64_t is, uint64_t ie, uint64_t s, uint64_t n, uint64_t qubit, kronecker_iu m, cvector_t * v, cvector_t * res)
{
uint64_t k = n-qubit;
// println("run : " << is << " .. " << ie);
complex_t * pv = v->data();
complex_t * pr = res->data();
size_t bc, c1, c2;
size_t nk = n-k;
for (uint64_t r=is; r<ie; ++r)
{
bc = r;
c1 = __bit_reset(bc,nk);
c2 = __bit_set(bc,nk);
bc++;
#ifdef __OP_PREFETCH__
_mm_prefetch((char*)&pv[__bit_reset(bc,nk)],_MM_HINT_T0);
_mm_prefetch((char*)&pv[__bit_set(bc,nk)],_MM_HINT_T0);
#endif // __OP_PREFETCH__
#ifdef __AVX__
// cxc
xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]);
// cxr
// pr[r].xmm = complex_t::_mm256_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm);
#elif __SSE__
// complex_t s; // = 0;
// pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2));
// --- cc mul add ---
pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm);
// --- cr mul add --- pr[r].xmm = _mm_add_pd(complex_t::mul_cr(pv[c1].xmm,m.get(r,c1).xmm), complex_t::mul_cr(pv[c2].xmm,m.get(r,c2).xmm));
// --- f. mul add ---
// pr[r].xmm = complex_t::_mm_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm);
#else
pr[r] = (pv[c1]*(m.get(r,c1))) + (pv[c2]*(m.get(r,c2)));
#endif
}
return 0;
}
void sparse_mulmv(uint64_t n, uint64_t qubit, kronecker_iu m, cvector_t& v, cvector_t& res)
{
uint64_t k = n-qubit;
uint64_t rows = (1UL << n);
uint64_t z = 0;
#ifdef SEQUENTIAL
rw_process_iu(z,rows,1,n,qubit,m,&v,&res);
#else
/*xpu::task rw_t(rw_process_iu,0,0,0,n,qubit,m,&v,&res);
xpu::parallel_for process(z,rows,1,&rw_t);
process.run();*/
rw_process_iu(z,rows,0,n,qubit,m,&v,&res);
#endif
}
// static xpu::core::os::mutex mtx;
uint64_t rw_process_iui(uint64_t is, uint64_t ie, uint64_t s, uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t * v, cvector_t * res)
{
uint64_t k = n-qubit;
// println("run : " << is << " .. " << ie);
complex_t * pv = v->data();
complex_t * pr = res->data();
size_t bc, c1, c2;
size_t nk = n-k;
for (uint64_t r=is; r<ie; r++) //+=2)
{
// 1st
bc = r;
c1 = __bit_reset(bc,nk);
c2 = __bit_set(bc,nk);
bc++;
#ifdef __OP_PREFETCH__
_mm_prefetch((char*)&pv[__bit_reset(bc,nk)],_MM_HINT_T0);
_mm_prefetch((char*)&pv[__bit_set(bc,nk)],_MM_HINT_T0);
#endif // __OP_PREFETCH__
#ifdef __AVX__
// mtx.lock();
// cxc :
xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]);
// cxr
// pr[r].xmm = complex_t::_mm256_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm);
/*
__m256d a; //_mm256_loadu2_m128d((double*)&pv[c1], (double*)&pv[c2]);
a = _mm256_insertf128_pd(a,_mm_permute_pd(pv[c1].xmm,1), 0);
a = _mm256_insertf128_pd(a,_mm_permute_pd(pv[c2].xmm,1), 1);
print("(r="<<r<<") : pr12: "); xpu::dump_m256d(a);
// __m256d b = _mm256_set_m128d((m.get(r,c1)).xmm, (m.get(r,c2)).xmm);
__m256d b;
b = _mm256_insertf128_pd(b,_mm_permute_pd(m.get(r,c1).xmm, 1), 1);
print("(r="<<r<<") : c1 : "); xpu::dump_m256d(b);
b = _mm256_insertf128_pd(b,_mm_permute_pd(m.get(r,c2).xmm, 1), 0);
print("(r="<<r<<") : c2 : "); xpu::dump_m256d(b);
__m256d ab = xpu::_mm256_cmul_pd(a,b);
print("(r="<<r<<") : mul: "); xpu::dump_m256d(ab);
__m256d abr = _mm256_permute2f128_pd(ab, ab, 1);
print("(r="<<r<<") : prm: "); xpu::dump_m256d(abr);
ab = _mm256_add_pd(ab,abr);
print("(r="<<r<<") : add: "); xpu::dump_m256d(ab);
pr[r].xmm = _mm256_extractf128_pd(ab,0);
print("(r="<<r<<") : res:"); xpu::dump_m128d(pr[r].xmm);
mtx.unlock();
*/
#elif __SSE__
/*
mtx.lock();
print("(r="<<r<<") : pr1: "); xpu::dump_m128d(pv[c1].xmm);
print("(r="<<r<<") : pr2: "); xpu::dump_m128d(pv[c2].xmm);
print("(r="<<r<<") : c1 : "); xpu::dump_m128d((m.get(r,c1)).xmm);
print("(r="<<r<<") : c2 : "); xpu::dump_m128d((m.get(r,c2)).xmm);
*/
// --- cxc mul ---
pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm);
// --- cxr mul --- pr[r].xmm = _mm_add_pd(complex_t::mul_cr(pv[c1].xmm,m.get(r,c1).xmm), complex_t::mul_cr(pv[c2].xmm,m.get(r,c2).xmm));
// --- fus ma ---
// pr[r].xmm = complex_t::_mm_cr_mul_add_pd(pv[c1].xmm, m.get(r,c1).xmm, pv[c2].xmm, m.get(r,c2).xmm);
// pr[r].xmm = xpu::_mm128_mul_add_pc(pv[c1].xmm, pv[c2].xmm, m.get(r,c1).xmm, m.get(r,c2).xmm);
/*
print("(r="<<r<<") : res: "); xpu::dump_m128d(pr[r].xmm);
mtx.unlock();
*/
#else
pr[r] = (pv[c1]*(m.get(r,c1))) + (pv[c2]*(m.get(r,c2)));
#endif
/*
// 2nd
c1 = __bit_reset(bc,n-k);
c2 = __bit_set(bc,n-k);
#ifdef __AVX__NO
a = _mm256_loadu2_m128d((double*)&pv[c1], (double*)&pv[c2]);
// __m256d b = _mm256_set_m128d((m.get(r,c1)).xmm, (m.get(r,c2)).xmm);
b = _mm256_insertf128_pd(b,(m.get(bc,c1)).xmm, 1);
b = _mm256_insertf128_pd(b,(m.get(bc,c2)).xmm, 0);
ab = xpu::_mm256_cmul_pd(a,b);
abr = _mm256_permute2f128_pd(ab, ab, 1);
ab = _mm256_add_pd(ab,abr);
pr[bc].xmm = _mm256_extractf128_pd(ab,0);
#elif __SSE__
pr[bc].xmm = _mm_add_pd((pv[c1]*(m.get(bc,c1))).xmm, (pv[c2]*(m.get(bc,c2))).xmm);
#else
pr[bc] = (pv[c1]*(m.get(bc,c1))) + (pv[c2]*(m.get(bc,c2)));
#endif
*/
}
return 0;
}
void sparse_mulmv(uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t& v, cvector_t& res)
{
uint64_t k = n-qubit;
uint64_t rows = (1UL << n);
uint64_t z = 0;
#ifdef SEQUENTIAL
rw_process_iui(z,rows,1,n,qubit,m,&v,&res);
#else
/*xpu::task rw_t(rw_process_iui,0,0,0,n,qubit,m,&v,&res);
xpu::parallel_for process(z,rows,1,&rw_t);
process.run();*/
#endif
}
inline void sqg_apply(cmatrix_t & cm, uint64_t qubit, qu_register& qureg)
{
uint64_t n = qureg.size();
complex_t * s = qureg.get_data().data();
// cm.dump();
__apply_m(0, (1UL << n), qubit, s, 0, (1UL << qubit), cm.m);
return;
}
#endif // remove naive tensor computation
typedef enum
{
__x180__,
__x90__ ,
__y180__,
__y90__ ,
__ym90__
} elementary_operation_t;
static const char * pulse_lt[][5] =
{
{ " pulse 9,0,0", " pulse 10,0,0", " pulse 11,0,0", " pulse 12,0,0", " pulse 14,0,0" },
{ " pulse 0,9,0", " pulse 0,10,0", " pulse 0,11,0", " pulse 0,12,0", " pulse 0,14,0" },
{ " pulse 0,0,9", " pulse 0,0,10", " pulse 0,0,11", " pulse 0,0,12", " pulse 0,0,14" },
};
/**
* \brief hadamard gate:
*
* | 1 1|
* 1/sqrt(2) | |
* | 1 -1|
*/
class hadamard : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
hadamard(uint64_t qubit) : qubit(qubit) //,m((complex_t*)hadamard_c)
{
m = build_matrix(hadamard_c,2);
}
int64_t apply(qu_register& qureg)
{
size_t qs = qureg.states();
complex_t * data = qureg.get_data().data();
// sqg_apply(m,qubit,qureg);
__apply_h(0, qs, qubit, data, 0, (1UL << qubit), hadamard_c);
// __apply_m(0, qs, qubit, data, 0, (1 << qubit), hadamard_c);
//__apply_h_old(0, qs, qubit, data, 0, (1 << qubit), hadamard_c);
// qureg.set_binary(qubit,__state_unknown__);
qureg.set_measurement_prediction(qubit,__state_unknown__);
return 0;
}
std::string micro_code()
{
/**
| wait 5
| y90 q0 --> { pulse 12,0,0 }
| wait 5
| x180 q0 --> { pulse 9,0,0 }
*/
if (qubit > 2) return "# unsupported operation : qubit out of range";
std::stringstream uc;
uc << pulse_lt[qubit][__y90__] << "\n";
uc << " wait 4 \n";
uc << pulse_lt[qubit][__x180__] << "\n";
uc << " wait 4 \n";
return uc.str();
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __hadamard_gate__;
}
void dump()
{
println(" [-] hadamard(q=" << qubit << ")");
}
};
inline void __swap(cvector_t& amp, size_t size, size_t bit, size_t trg, size_t ctrl, size_t offset=0)
{
// println("bit=" << bit);
// println("ctrl=" << ctrl);
complex_t * p = amp.data();
size_t incrementer = 1UL << (bit+1);
if ((1UL<<bit) == 1) {
for (size_t i=__bit_set(0,bit), end=(1UL<<size); i<end; i+=incrementer) {
size_t v = i+offset;
std::swap(amp[v], amp[__bit_reset(v,trg)]);
}
}
else {
for (size_t i=__bit_set(0,bit), end=(1UL<<size); i<end; i+=incrementer) {
size_t v = i+offset;
for (size_t j=0; j<(1UL<<bit); j++)
{
// v += j;
/*
#ifdef __SSE__
__m128d x = _mm_load_pd((const double *)&p[v]);
__m128d y = _mm_load_pd((const double *)&p[__bit_reset(v,trg)]);
_mm_store_pd((double *)&p[__bit_reset(v,trg)],x);
_mm_store_pd((double *)&p[v],y);
#else
*/
std::swap(amp[v], amp[__bit_reset(v,trg)]);
++v;
// println("swap("<<v<<","<<__bit_reset(v,trg)<<")");
// #endif
}
}
}
}
inline int cx_worker(uint64_t cs, uint64_t ce, uint64_t s, cvector_t * p_amp, size_t bit1, size_t bit2, size_t trg, size_t ctrl)
{
cvector_t & = * p_amp;
// xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t);
size_t step=(1UL << (bit1+1));
// size_t b = cs;
// size_t e = ce;
size_t offset = __bit_set(0,bit1);
//for (size_t i=__bit_set(0,bit1); i<(1<<size); i += (1 << (bit1+1)))
//__swap(amp,bit1,bit2,trg,ctrl,i);
// for (size_t i=b; i<e; i++)
// __swap(amp,bit1,bit2,trg,ctrl,offset+(i*step));
for (size_t i=cs; i<ce; i++)
__swap(amp,bit1,bit2,trg,ctrl,offset+(i*step));
return 0;
}
/**
* \brief controlled-not gate:
*
* | 1 0 0 0 |
* | 0 1 0 0 |
* | 0 0 0 1 |
* | 0 0 1 1 |
*/
class cnot : public gate
{
private:
uint64_t control_qubit;
uint64_t target_qubit;
cmatrix_t m;
public:
cnot(uint64_t ctrl_q, uint64_t target_q) : control_qubit(ctrl_q),
target_qubit(target_q)
{
// m = build_matrix(cnot_c,4); // stack smaching
}
// #define CG_HASH_SET
//#define CG_MATRIX
#ifndef CG_BC
#ifndef CG_MATRIX
#define CG_BC
#endif
#endif // CG_BC
int64_t apply(qu_register& qreg)
{
// println("cnot " << control_qubit << "," << target_qubit);
#ifdef CG_MATRIX
uint64_t sn = qreg.states();
uint64_t qn = qreg.size();
uint64_t cq = control_qubit;
uint64_t tq = target_qubit;
cmatrix_t i = cidentity_t(sn);
perm_t p = perms(qn,cq,tq);
// dump_matrix(i);
for (perm_t::iterator it = p.begin(); it != p.end(); it++)
{
i(it->first,it->second) = 1;
i(it->second,it->first) = 1;
i(it->first, it->first) = 0;
i(it->second,it->second) = 0;
}
// dump_matrix(i);
qreg = mxv(i, qreg.get_data());
#elif defined(CG_BC)
uint64_t sn = qreg.states();
uint64_t qn = qreg.size();
uint64_t cq = control_qubit;
uint64_t tq = target_qubit;
cvector_t& amp = qreg.get_data();
// perms(qn,cq,tq,amp);
// #if 0
size_t b1 = std::max(cq,tq);
size_t b2 = std::min(cq,tq);
size_t steps = ((1UL << qn)-(__bit_set(0,b1)))/(1UL << (b1+1))+1;
/*
println("from=" << (__bit_set(0,b1)));
println("to=" << (1 << qn));
println("s=" << (1 << (b1+1)));
println("steps=" << steps);
*/
if (qn<17)
fast_cx(amp, qn, b1, b2, tq, cq);
else
{
#ifdef USE_OPENMP
#pragma omp parallel
{
#ifndef _MSC_VER
#pragma omp for simd
#endif
for (size_t i=0; i<steps; ++i)
cx_worker(i,i+1,1UL,&,b1,b2,(size_t)tq,(size_t)cq);
}
#else
xpu::task t(cx_worker,0UL,0UL,0UL,&,b1,b2,(size_t)tq,(size_t)cq);
xpu::parallel_for fswp(0, steps, 1, &t);
fswp.run();
#endif
}
// #endif
#elif defined(CG_HASH_SET)
uint64_t j = control_qubit+1;
uint64_t k = target_qubit+1;
uint64_t k2 = (1UL << (k-1));
uint64_t j2 = (1UL << (j-1));
uint64_t r_size = qreg.states();
xpu::container::hash_set<uint64_t> swap_set;
// find swap pairs
for (uint64_t t = 0; t < r_size; t++)
{
if ((t & j2) <= 0)
continue;
if (swap_set.find(t-k2) == swap_set.end())
swap_set.insert(t);
}
int64_t t2;
cvector_t& amp = qreg.get_data();
complex_t c1(0., 0.), c2(0., 0.);
for (xpu::container::hash_set<uint64_t>::iterator t = swap_set.begin(); t != swap_set.end(); ++t)
{
int64_t _t = *t;
t2 = (_t + k2 < r_size) ? _t + k2 : _t - k2;
c1 = amp(_t);
c2 = amp(t2);
std::swap(c1, c2);
amp(_t) = c1;
amp(t2) = c2;
}
//qreg=amp;
#endif // CG_HASH_SET
// if (qreg.get_binary(control_qubit) == __state_1__)
if (qreg.get_measurement_prediction(control_qubit) == __state_1__)
qreg.flip_binary(target_qubit);
//else if (qreg.get_binary(control_qubit) == __state_unknown__)
else if (qreg.get_measurement_prediction(control_qubit) == __state_unknown__)
qreg.set_measurement_prediction(target_qubit,__state_unknown__);
// qreg.set_binary(target_qubit,__state_unknown__);
return 0;
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(control_qubit);
r.push_back(target_qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
r.push_back(control_qubit);
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(target_qubit);
return r;
}
gate_type_t type()
{
return __cnot_gate__;
}
void dump()
{
println(" [-] cnot(ctrl_qubit=" << control_qubit << ", target_qubit=" << target_qubit << ")");
}
private:
#if 0
void __swap(cvector_t& amp, size_t size, size_t bit, size_t trg, size_t ctrl, size_t offset=0)
{
// println("bit=" << bit);
// println("ctrl=" << ctrl);
for (size_t i=__bit_set(0,bit); i<(1UL<<size); i += (1UL << (bit+1)))
for (size_t j=0; j<(1<<bit); j++)
{
size_t v = i+j+offset;
std::swap(amp[v], amp[__bit_reset(v,trg)]);
// println(" swap(" << std::bitset<16>(v) << "," << std::bitset<16>(__bit_reset(v,trg)) << ")");
}
}
#endif
void fast_cx(cvector_t& amp, size_t size, size_t bit1, size_t bit2, size_t trg, size_t ctrl)
{
/*
println("from=" << (__bit_set(0,bit1)));
println("to=" << (1 << size));
println("s=" << (1 << (bit1+1)));
*/
for (size_t i=__bit_set(0,bit1); i<(1UL<<size); i += (1UL << (bit1+1)))
__swap(amp,bit1,bit2,trg,ctrl,i);
}
};
template<typename T>
void swap_if_greater(T& a, T& b)
{
if (a > b)
{
T tmp(a);
a = b;
b = tmp;
}
}
template<typename T>
void sort(T& a, T& b, T& c)
{
swap_if_greater(a, b);
swap_if_greater(a, c);
swap_if_greater(b, c);
}
/**
* \brief toffoli gate:
*
* | 1 0 0 0 |
* | 0 1 0 0 |
* | 0 0 0 1 |
* | 0 0 1 1 |
*/
class toffoli : public gate
{
private:
uint64_t control_qubit_1;
uint64_t control_qubit_2;
uint64_t target_qubit;
public:
toffoli(uint64_t ctrl_q1, uint64_t ctrl_q2, uint64_t target_q) : control_qubit_1(ctrl_q1),
control_qubit_2(ctrl_q2),
target_qubit(target_q)
{
}
int64_t apply(qu_register& qreg)
{
uint64_t sn = qreg.states();
uint64_t qn = qreg.size();
uint64_t cq1 = control_qubit_1;
uint64_t cq2 = control_qubit_2;
uint64_t tq = target_qubit;
cvector_t& amp = qreg.get_data();
//println("\ntoffoli " << cq1 << "," << cq2 << "," << tq);
#if 1
size_t c1=cq1;
size_t c2=cq2;
size_t c3=tq;
size_t t=tq;
size_t size=qn;
sort(c1,c2,c3);
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (int64_t i=__bit_set(__bit_set(__bit_set(0,c1),c2),c3); i<(int64_t)(1UL<<size); i += (1UL << (c3+1)))
for (size_t j=(size_t)i; j<((size_t)i+(1UL<<c3)); j += (1UL << (c2+1)))
for (size_t k=j; k<(j+(1UL<<c2)); k+=(1UL << (c1+1)))
for (size_t l=k; l<(k+(1UL<<(c1))); l++)
{
std::swap(amp[__bit_set(l,t)],amp[__bit_reset(l,t)]);
// println("swap : " << __bit_set(l,t) << "," << __bit_reset(l,t));
}
#else
std::vector<uint64_t> done(sn, 0);
perm_t p = perms(qn,cq1,cq2,tq);
uint64_t p1,p2;
for (perm_t::iterator it = p.begin(); it != p.end(); it++)
{
p1 = it->first;
p2 = it->second;
if (!(done[p1] || done[p2]))
//if (!(done[p1]))
{
// std::swap(amp(p1),amp(p2)); // ublas
std::swap(amp[p1],amp[p2]);
//println("swap : " << p1 << "," << p2);
done[p1] = 1;
done[p2] = 1;
}
}
#endif
if ((qreg.get_measurement_prediction(control_qubit_1) == __state_1__) &&
(qreg.get_measurement_prediction(control_qubit_2) == __state_1__) )
{
qreg.flip_binary(target_qubit);
}
else if ((qreg.get_measurement_prediction(control_qubit_1) == __state_unknown__) ||
(qreg.get_measurement_prediction(control_qubit_2) == __state_unknown__) )
{
qreg.set_measurement_prediction(target_qubit,__state_unknown__);
// qreg.set_binary(target_qubit,__state_unknown__);
}
return 0;
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(control_qubit_1);
r.push_back(control_qubit_2);
r.push_back(target_qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
r.push_back(control_qubit_1);
r.push_back(control_qubit_2);
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(target_qubit);
return r;
}
gate_type_t type()
{
return __toffoli_gate__;
}
void dump()
{
println(" [-] toffoli(ctrl_qubit_1=" << control_qubit_1 << ", ctrl_qubit_2=" << control_qubit_2 << ", target_qubit=" << target_qubit << ")");
}
};
int fliper(int cs, int ce, int s, uint64_t q, cvector_t * p_amp)
{
cvector_t & = * p_amp;
for (int i=cs; i<ce; ++i)
{
if (__bit_test(i,q))
std::swap(amp[i],amp[__bit_flip(i,q)]);
}
return 0;
}
#define __swap_xmm(x,y) { x = _mm_xor_pd(x,y); y = _mm_xor_pd(y,x); x = _mm_xor_pd(x,y); }
void fast_flip(uint64_t q, uint64_t n, cvector_t& amp)
{
complex_t * x = amp.data();
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (int64_t i=0; i<(int64_t)(1UL << n); i+=(1UL << (q+1)))
for (size_t j=(size_t)i; j<((size_t)i+(1UL << q)); j++)
//__swap_xmm(x[j].xmm,x[__bit_flip(j,q)].xmm);
std::swap(x[j].xmm,x[__bit_flip(j,q)].xmm);
}
void flip(uint64_t q, uint64_t n, cvector_t& amp)
{
uint64_t nn = (1UL << n);
uint64_t p1, p2;
std::bitset<MAX_QB_N> b;
// perm_t res;
b.reset();
b.set(q);
uint64_t bc = b.to_ulong();
while (bc < nn)
{
b.set(q); p1 = b.to_ulong();
b.flip(q); p2 = b.to_ulong();
if (p2<p1)
std::swap(amp[p1],amp[p2]);
b.flip(q);
b = inc(b);
b.set(q);
bc = b.to_ulong();
}
//return res;
}
/**
* \brief identity :
*
* | 1 0 |
* | 0 1 |
*
*/
class identity : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
identity(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(identity_c,2);
}
int64_t apply(qu_register& qreg)
{
return 0;
}
std::string micro_code()
{
if (qubit > 2) return "# unsupported operation : qubit out of range";
std::stringstream uc;
// uc << pulse_lt[qubit][__x180__] << "\n";
uc << " wait 4 \n";
return uc.str();
}
void dump()
{
println(" [-] identity(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __identity_gate__;
}
};
/**
* \brief pauli-x :
*
* | 0 1 |
* | 1 0 |
*
*/
class pauli_x : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
pauli_x(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(pauli_x_c,2);
}
int64_t apply(qu_register& qreg)
{
// #define FAST_FLIP
#ifdef FAST_FLIP
uint64_t qn = qreg.size();
cvector_t& amp = qreg.get_data();
// flip(qubit,qn,amp);
fast_flip(qubit,qn,amp);
/*
xpu::task flip_t(fliper,0,0,0,qubit,&);
xpu::parallel_for parallel_flip(0,(1 << qn),1,&flip_t);
parallel_flip.run();
*/
#else
uint64_t n = qreg.size();
complex_t * s = qreg.get_data().data();
// cm.dump();
__apply_m(0, (1UL << n), qubit, s, 0, (1UL << qubit), m.m);
// sqg_apply(m,qubit,qreg);
#endif // FAST_FLIP
qreg.flip_binary(qubit);
return 0;
}
std::string micro_code()
{
/**
| wait 5
| x180 q0 --> { pulse 9,0,0 }
*/
if (qubit > 2) return "# unsupported operation : qubit out of range";
std::stringstream uc;
uc << pulse_lt[qubit][__x180__] << "\n";
uc << " wait 4 \n";
return uc.str();
}
void dump()
{
println(" [-] pauli-x(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __pauli_x_gate__;
}
};
/**
* \brief pauli-y :
*
* | 0 -i |
* | i 0 |
*/
class pauli_y : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
pauli_y(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(pauli_y_c,2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
qreg.flip_binary(qubit);
return 0;
}
std::string micro_code()
{
/**
| wait 5
| x180 q0 --> { pulse 9,0,0 }
*/
if (qubit > 2) return "# unsupported operation : qubit out of range";
std::stringstream uc;
uc << pulse_lt[qubit][__y180__] << "\n";
uc << " wait 4 \n";
return uc.str();
}
void dump()
{
println(" [-] pauli-y(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __pauli_y_gate__;
}
};
/**
* \brief pauli-z :
*
* | 1 0 |
* | 0 -1 |
*/
class pauli_z : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
pauli_z(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(pauli_z_c,2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
return 0;
}
std::string micro_code()
{
/**
| wait 5
| x180 q0 --> { pulse 9,0,0 }
*/
if (qubit > 2) return "# unsupported operation : qubit out of range";
std::stringstream uc;
uc << pulse_lt[qubit][__y180__] << "\n";
uc << " wait 4 \n";
uc << pulse_lt[qubit][__x180__] << "\n";
uc << " wait 4 \n";
return uc.str();
}
void dump()
{
println(" [-] pauli-z(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __pauli_z_gate__;
}
};
/**
* \brief phase :
*
* | 1 0 |
* | 0 i |
*/
class phase_shift : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
phase_shift(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(phase_c,2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
return 0;
}
std::string micro_code()
{
if (qubit > 2) return "# unsupported operation : qubit out of range";
std::stringstream uc;
uc << pulse_lt[qubit][__y90__] << "\n";
uc << " wait 4 \n";
uc << pulse_lt[qubit][__x90__] << "\n";
uc << " wait 4 \n";
uc << pulse_lt[qubit][__ym90__] << "\n";
uc << " wait 4 \n";
return uc.str();
}
void dump()
{
println(" [-] phase(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __phase_gate__;
}
};
/**
* \brief S dag gate
*/
class s_dag_gate : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
s_dag_gate(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(sdag_gate_c,2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
return 0;
}
void dump()
{
println(" [-] s_dag_gate(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __sdag_gate__;
}
};
/**
* \brief T gate
*/
class t_gate : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
t_gate(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(t_gate_c,2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
return 0;
}
void dump()
{
println(" [-] t_gate(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __t_gate__;
}
};
/**
* \brief T dag gate
*/
class t_dag_gate : public gate
{
private:
uint64_t qubit;
cmatrix_t m;
public:
t_dag_gate(uint64_t qubit) : qubit(qubit)
{
m = build_matrix(tdag_gate_c,2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
return 0;
}
void dump()
{
println(" [-] t_dag_gate(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __tdag_gate__;
}
};
/**
* phase factoring
*/
void reset_gphase(cmatrix_t& m)
{
double n = m(0,0).norm();
if (n > 10e-9)
{
complex_t p(m(0,0).re/n,m(0,0).im/n);
m(0,0) /= p;
m(0,1) /= p;
m(1,0) /= p;
m(1,1) /= p;
}
else
{
n = m(0,1).norm();
complex_t p(m(0,1).re/n,m(0,1).im/n);
m(0,0) /= p;
m(0,1) /= p;
m(1,0) /= p;
m(1,1) /= p;
}
double n1 = std::sqrt(m(0,0).norm()+m(1,0).norm());
double n2 = std::sqrt(m(0,1).norm()+m(1,1).norm());
m(0,0) /= n1;
m(0,1) /= n2;
m(1,0) /= n1;
m(1,1) /= n2;
}
/**
* | (cos(?/2) -e(i?)sin(?/2)) |
* general gate u = | |
* | (e(i?)sin(?/2) e(i?+i?)cos(?/2)) |
*/
class unitary : public gate
{
private:
uint64_t qubit;
double angle[3];
cmatrix_t m;
public:
unitary(uint64_t qubit, double angle[3]) : qubit(qubit)
{
// m.resize(2,2);
m(0,0) = cos(angle[1]/2); m(0,1) = complex_t(-cos(angle[2]/2),-sin(angle[2]/2))*sin(angle[1]/2);
m(1,0) = complex_t(cos(angle[3]/2),sin(angle[3]/2))*sin(angle[1]/2) ; m(1,1) = complex_t(cos((angle[3]/2)+(angle[2]/2)),sin((angle[3]/2)+(angle[2]/2)))*cos(angle[1]/2);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
qreg.set_measurement_prediction(qubit,__state_unknown__);
// qreg.set_binary(qubit,__state_unknown__);
return 0;
}
double get_angle()
{
return *angle;
}
void dump()
{
println(" [-] unitary(qubit=" << qubit << ", angle=" << angle << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __unitary_gate__;
}
};
/**
* \brief rotation-x :
*/
class rx : public gate
{
private:
uint64_t qubit;
double angle;
cmatrix_t m;
public:
rx(uint64_t qubit, double angle) : qubit(qubit), angle(angle)
{
// m.resize(2,2);
m(0,0) = cos(angle/2); m(0,1) = complex_t(0,-sin(angle/2));
m(1,0) = complex_t(0,-sin(angle/2)); m(1,1) = cos(angle/2);
reset_gphase(m);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
qreg.set_measurement_prediction(qubit,__state_unknown__);
// qreg.set_binary(qubit,__state_unknown__);
return 0;
}
void dump()
{
println(" [-] rx(qubit=" << qubit << ", angle=" << angle << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __rx_gate__;
}
};
/**
* \brief rotation-y :
*/
class ry : public gate
{
private:
uint64_t qubit;
double angle;
cmatrix_t m;
public:
ry(uint64_t qubit, double angle) : qubit(qubit), angle(angle)
{
// m.resize(2,2);
m(0,0) = cos(angle/2); m(0,1) = -sin(angle/2);
m(1,0) = sin(angle/2); m(1,1) = cos(angle/2);
// reset_gphase(m);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
qreg.set_measurement_prediction(qubit,__state_unknown__);
//qreg.set_binary(qubit,__state_unknown__);
return 0;
}
void dump()
{
println(" [-] ry(qubit=" << qubit << ", angle=" << angle << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __ry_gate__;
}
};
/**
* \brief rotation-z :
*/
class rz : public gate
{
private:
uint64_t qubit;
double angle;
cmatrix_t m;
public:
rz(uint64_t qubit, double angle) : qubit(qubit), angle(angle)
{
// m.resize(2,2);
m(0,0) = complex_t(cos(-angle/2), sin(-angle/2)); m(0,1) = 0;
m(1,0) = 0; m(1,1) = complex_t(cos(angle/2), sin(angle/2));
reset_gphase(m);
}
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
qreg.set_measurement_prediction(qubit,__state_unknown__);
//qreg.set_binary(qubit,__state_unknown__);
return 0;
}
void dump()
{
println(" [-] rz(qubit=" << qubit << ", angle=" << angle << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
gate_type_t type()
{
return __rz_gate__;
}
};
void __shift(cvector_t& amp, size_t size, size_t bit, complex_t p, size_t offset=0)
{
// println("bit=" << bit);
// println("ctrl=" << ctrl);
complex_t * x = amp.data();
// println(">>>> " << p);
for (size_t i=__bit_set(0,bit); i<(1UL<<size); i += (1UL << (bit+1)))
for (size_t j=0; j<(1UL<<bit); j++)
{
size_t v = i+j+offset;
// amp[v] *= p;
// println(" before mul : " << x[v]);
x[v] *= p;
// println(" after mul : " << x[v]);
// println(" swap(" << std::bitset<16>(v) << "," << std::bitset<16>(__bit_reset(v,trg)) << ")");
}
}
void __shift(complex_t * x, size_t size, size_t bit, complex_t p, size_t offset=0)
{
// println("bit=" << bit);
// println("ctrl=" << ctrl);
for (size_t i=__bit_set(0,bit); i<(1UL<<size); i += (1UL << (bit+1)))
for (size_t j=0; j<(1UL<<bit); j++)
{
size_t v = i+j+offset;
// amp[v] *= p;
x[v] *= p;
// println(" swap(" << std::bitset<16>(v) << "," << std::bitset<16>(__bit_reset(v,trg)) << ")");
}
}
int shift_worker(int cs, int ce, int s, cvector_t * p_amp, size_t bit1, size_t bit2, complex_t p)
{
cvector_t & = * p_amp;
// xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t);
size_t step=(1UL << (bit1+1));
size_t b = cs;
size_t e = ce;
size_t offset = __bit_set(0,bit1);
//for (size_t i=__bit_set(0,bit1); i<(1<<size); i += (1 << (bit1+1)))
//__swap(amp,bit1,bit2,trg,ctrl,i);
for (size_t i=b; i<e; i++)
__shift(amp,bit1,bit2,p,offset+(i*step));
return 0;
}
uint64_t qft_1st_fold_worker(uint64_t is, uint64_t ie, uint64_t s, uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t * v, cvector_t * res)
{
uint64_t k = n-qubit;
// println("run : " << is << " .. " << ie);
complex_t * pv = v->data();
complex_t * pr = res->data();
size_t bc, c1, c2;
for (uint64_t r=is; r<ie; ++r)
{
bc = r;
c1 = __bit_reset(bc,n-k);
c2 = __bit_set(bc,n-k);
#ifdef __OP_PREFETCH__
_mm_prefetch((char*)&pv[__bit_reset((bc+1),n-k)],_MM_HINT_T0);
_mm_prefetch((char*)&pv[__bit_set((bc+1),n-k)],_MM_HINT_T0);
#endif // __OP_PREFETCH__
#ifdef __AVX__ //NO
xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]);
#else
// complex_t s; // = 0;
//pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2));
pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm);
#endif
}
size_t bit2 = qubit;
for (size_t j=qubit+1; j<n; ++j)
{
complex_t p(cos(QX_PI/(1UL << (j-qubit))), sin(QX_PI/(1UL << (j- qubit))));
size_t bit1 = j;
size_t step=(1UL << (bit1+1));
size_t offset = __bit_set(0,bit1);
for (size_t i=is; i<ie; i++)
{
// println("i=" << i*step);
__shift(pr,bit1,bit2,p,offset+(i*step));
}
}
return 0;
}
void qft_1st_fold(uint64_t n, uint64_t qubit, kronecker_ui m, cvector_t& v, cvector_t& res)
{
uint64_t k = n-qubit;
uint64_t rows = (1UL << n);
uint64_t z = 0;
//xpu::task qf_t(qft_fold_worker,0,0,0,n,qubit,m,&v,&res);
//xpu::parallel_for process(z,rows,1,&qf_t);
//process.run();
static const uint64_t SIZE = 1000;
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (int64_t batch = 0; batch <= rows / SIZE; batch++) {
qft_1st_fold_worker(batch*SIZE,std::min<uint64_t>((batch+1)*SIZE,rows),1,n,qubit,m,&v,&res);
}
}
uint64_t qft_nth_fold_worker(uint64_t is, uint64_t ie, uint64_t s, uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t * v, cvector_t * res)
{
uint64_t k = n-qubit;
// println("run : " << is << " .. " << ie);
complex_t * pv = v->data();
complex_t * pr = res->data();
size_t bc, c1, c2;
for (uint64_t r=is; r<ie; ++r)
{
bc = r;
c1 = __bit_reset(bc,n-k);
c2 = __bit_set(bc,n-k);
#ifdef __OP_PREFETCH__
_mm_prefetch((char*)&pv[__bit_reset((bc+1),n-k)],_MM_HINT_T0);
_mm_prefetch((char*)&pv[__bit_set((bc+1),n-k)],_MM_HINT_T0);
#endif // __OP_PREFETCH__
#ifdef __AVX__ //NO
xpu::_mm_cmul_add_pd(pv[c1], pv[c2], m.get(r,c1), m.get(r,c2),pr[r]);
#else
// complex_t s; // = 0;
//pr[r] = pv[c1]*(m->get(r,c1)) + pv[c2]*(m->get(r,c2));
pr[r].xmm = _mm_add_pd((pv[c1]*(m.get(r,c1))).xmm, (pv[c2]*(m.get(r,c2))).xmm);
#endif
}
size_t bit2 = qubit;
for (size_t j=qubit+1; j<n; ++j)
{
complex_t p(cos(QX_PI/(1UL << (j-qubit))), sin(QX_PI/(1UL << (j-qubit))));
size_t bit1 = j;
size_t step=(1UL << (bit1+1));
size_t offset = __bit_set(0,bit1);
for (size_t i=is; i<ie; i++)
{
__shift(pr,bit1,bit2,p,offset+(i*step));
}
}
return 0;
}
void qft_nth_fold(uint64_t n, uint64_t qubit, kronecker_iui m, cvector_t& v, cvector_t& res)
{
uint64_t k = n-qubit;
uint64_t rows = (1UL << n);
uint64_t z = 0;
//xpu::task qf_t(qft_fold_worker,0,0,0,n,qubit,m,&v,&res);
//xpu::parallel_for process(z,rows,1,&qf_t);
//process.run();
static const uint64_t SIZE = 1000;
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (int64_t batch = 0; batch <= rows / SIZE; batch++) {
qft_nth_fold_worker(batch*SIZE,std::min<uint64_t>((batch+1)*SIZE,rows),1,n,qubit,m,&v,&res);
}
}
int qft_worker(int cs, int ce, int s, size_t n, cvector_t& p_in, cvector_t& p_out, kronecker_ui kr, size_t qubit)
{
complex_t * in = p_in.data();
complex_t * out = p_out.data();
cvector_t & amp = p_out;
// xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t);
size_t b = cs;
size_t e = ce;
rw_process_ui(cs, ce, s, n, qubit, kr, &p_in, &p_out); // H
size_t bit2 = qubit;
for (size_t j=qubit+1; j<n; ++j)
{
complex_t p(cos(QX_PI/(1UL << (j-qubit))), sin(QX_PI/(1UL << (j- qubit))));
size_t bit1 = j;
size_t step=(1UL << (bit1+1));
size_t offset = __bit_set(0,bit1);
for (size_t i=b; i<e; i++)
{
println("i=" << i*step);
__shift(amp,bit1,bit2,p,offset+(i*step));
}
}
return 0;
}
int qft_worker(int cs, int ce, int s, size_t n, cvector_t& p_in, cvector_t& p_out, kronecker_iui kr, size_t qubit)
{
complex_t * in = p_in.data();
complex_t * out = p_out.data();
cvector_t & amp = p_out;
// xpu::parallel_for fswp(__bit_set(0,b1), (1 << qn), (1 << (b1+1)), &t);
size_t b = cs;
size_t e = ce;
rw_process_iui(cs, ce, s, n, qubit, kr, &p_in, &p_out); // H
return 0;
size_t bit2 = qubit;
for (size_t j=qubit+1; j<n; ++j)
{
complex_t p(cos(QX_PI/(1UL << (j-qubit))), sin(QX_PI/(1UL << (j- qubit))));
size_t bit1 = j;
size_t step=(1UL << (bit1+1));
size_t offset = __bit_set(0,bit1);
for (size_t i=b; i<e; i++)
{
__shift(p_out,bit1,bit2,p,offset+(i*step));
}
}
return 0;
}
/**
* \brief qft
*/
class qft : public gate
{
private:
std::vector<uint64_t> qubit;
cmatrix_t hm;
public:
qft(std::vector<uint64_t> qubit) : qubit(qubit)
{
hm = build_matrix(hadamard_c,2);
}
int64_t apply(qu_register& qreg)
{
size_t n = qreg.size();
size_t s = qreg.states();
cvector_t& in = qreg.get_data();
cvector_t& out = qreg.get_aux();
// kronecker_ui kui(hm,2,(1 << (n-1)));
kronecker_ui kui(hadamard_c,2,(1 << (n-1)));
qft_1st_fold(n, 0, kui, in, out);
for (size_t i=1; i<n-1; ++i)
{
size_t q = qubit[i];
// kronecker_iui kiui(hm, 2, (1 << (n-q-1)), (1 << (q)));
kronecker_iui kiui(hadamard_c, 2, (1UL << (n-q-1)), (1UL << (q)));
qft_nth_fold(n, 0, kiui, in, out);
}
in.swap(out);
return 0;
#if 0
// 1st fold
qft_worker(0, s, 1, n, in, out, kronecker_ui(m,2,s-2), 0);
return 0;
// ith fold
for (size_t i=1; i<qubit.size(); ++i)
{
size_t q = qubit[i];
kronecker_iui k(m, 2, (1UL << (n-q-1)), (1UL << (q)));
qft_worker(0, qreg.states(), 1, qreg.size(), (qreg.get_data()), (qreg.get_aux()), k, q);
}
// last fold
kronecker_iu k(m,2,(1UL << (n-1)));
sparse_mulmv(n,qubit[n-1],k,qreg.get_data(),qreg.get_aux());
in.swap(out);
return 0;
#endif
}
void dump()
{
print(" [-] qft(");
for (size_t i=0; i<(qubit.size()-1); ++i)
print("q" << qubit[i] << ",");
println("q" << qubit[qubit.size()-1] << ")");
}
std::vector<uint64_t> qubits()
{
return qubit;
}
std::vector<uint64_t> control_qubits()
{
return qubit;
}
std::vector<uint64_t> target_qubits()
{
return qubit;
}
gate_type_t type()
{
return __qft_gate__;
}
};
/**
* phase shifter
*/
void __apply_cm(complex_t * state,
complex_t m[2][2],
std::size_t i11, std::size_t i12, std::size_t i13,
std::size_t i21, std::size_t i22, std::size_t i23,
std::size_t i31, std::size_t i32, std::size_t ish )
{
complex_t m00 = m[0][0],
m01 = m[0][1],
m10 = m[1][0],
m11 = m[1][1];
for(std::size_t r1 = i11; r1 < i12; r1 += i13)
{
#ifdef USE_OPENMP
// #pragma omp parallel for
#endif
for(std::size_t r2 = r1 + i21; r2 < r1 + i22; r2 += i23)
{
for(std::size_t ind0 = r2 + i31; ind0 < r2 + i32; ind0++)
{
std::size_t ind1 = ind0 + ish;
complex_t in0 = state[ind0], in1 = state[ind1];
state[ind0] = m00 * in0 + m01 * in1;
state[ind1] = m10 * in0 + m11 * in1;
}
}
}
}
/**
* \brief controlled phase shift by arbitrary phase angle or (2*pi/(2^(k=ctrl-target)))
*/
class ctrl_phase_shift : public gate
{
private:
uint64_t ctrl_qubit;
uint64_t target_qubit;
complex_t z;
complex_t m[2][2];
double phase;
protected:
void build_operator()
{
m[0][0] = complex_t(cos(-phase/2), sin(-phase/2)); m[0][1] = 0.0;
m[1][0] = 0.0; m[1][1] = complex_t(cos(phase/2), sin(phase/2));
double n = m[0][0].norm();
if (n > 10e-9)
{
complex_t p(m[0][0].re/n,m[0][0].im/n);
m[0][0] /= p;
m[0][1] /= p;
m[1][0] /= p;
m[1][1] /= p;
}
else
{
n = m[0][1].norm();
complex_t p(m[0][0].re/n,m[0][0].im/n);
m[0][0] /= p;
m[0][1] /= p;
m[1][0] /= p;
m[1][1] /= p;
}
}
public:
/**
* ctor (q)
*/
ctrl_phase_shift(uint64_t ctrl_qubit, uint64_t target_qubit) : ctrl_qubit(ctrl_qubit),
target_qubit(target_qubit),
z(0.0, 0.0)
{
phase = 2*QX_PI/(1UL << (ctrl_qubit - target_qubit));
build_operator();
}
/**
* ctor (k)
*/
ctrl_phase_shift(uint64_t ctrl_qubit, uint64_t target_qubit, size_t k) : ctrl_qubit(ctrl_qubit),
target_qubit(target_qubit)
{
phase = 2*QX_PI/(1UL << k);
build_operator();
}
/**
* ctor (p)
*/
ctrl_phase_shift(uint64_t ctrl_qubit, uint64_t target_qubit, double angle) : ctrl_qubit(ctrl_qubit),
target_qubit(target_qubit)
{
phase = angle;
build_operator();
}
int64_t apply(qu_register& qreg)
{
uint64_t n = qreg.size();
complex_t * s = qreg.get_data().data();
size_t c = ctrl_qubit;
size_t t = target_qubit;
if (c > t)
__apply_cm(qreg.get_data().data(),
m,
0UL, (1UL << n), 1UL << (c+1l),
1UL << c, 1UL << (c+1UL), 1UL << (t+1UL),
0UL, 1UL << t, 1UL << t);
else
__apply_cm(qreg.get_data().data(),
m,
0UL, (1UL << n), 1UL << (t+1UL),
0UL, 1UL << t, 1UL << (c+1l),
1UL << c, 1UL<< (c+1UL), 1UL << t);
return 0;
}
void dump()
{
println(" [-] ctrl_phase_shift(ctrl_qubit=" << ctrl_qubit << ", target_qubit: " << target_qubit << ", phase = (" << z.re << ", i." << z.im << ") )");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(ctrl_qubit);
r.push_back(target_qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
r.push_back(ctrl_qubit);
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(target_qubit);
return r;
}
gate_type_t type()
{
return __ctrl_phase_shift_gate__;
}
};
/**
* \brief swap :
*
* | 1 0 0 0 |
* | 0 0 1 0 |
* | 0 1 0 0 |
* | 0 0 0 1 |
*/
class swap : public gate
{
private:
uint64_t qubit1;
uint64_t qubit2;
// cmatrix_t m;
public:
swap(uint64_t qubit1, uint64_t qubit2) : qubit1(qubit1), qubit2(qubit2)
{
// m = build_matrix(swap_c,4);
}
int64_t apply(qu_register& qreg)
{
cnot(qubit1,qubit2).apply(qreg);
cnot(qubit2,qubit1).apply(qreg);
cnot(qubit1,qubit2).apply(qreg);
return 0;
}
void dump()
{
println(" [-] swap(q1=" << qubit1 << ", q2=" << qubit2 << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit1);
r.push_back(qubit2);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit1);
r.push_back(qubit2);
return r;
}
gate_type_t type()
{
return __swap_gate__;
}
};
/**
* \brief cphase
*/
class cphase : public gate
{
private:
uint64_t ctrl_qubit;
uint64_t target_qubit;
public:
cphase(uint64_t ctrl_qubit, uint64_t target_qubit) : ctrl_qubit(ctrl_qubit), target_qubit(target_qubit)
{
}
int64_t apply(qu_register& qreg)
{
hadamard(target_qubit).apply(qreg);
cnot(ctrl_qubit,target_qubit).apply(qreg);
hadamard(target_qubit).apply(qreg);
return 0;
}
void dump()
{
println(" [-] cphase(ctrl_qubit=" << ctrl_qubit << ", target_qubit=" << target_qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(ctrl_qubit);
r.push_back(target_qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
r.push_back(ctrl_qubit);
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
r.push_back(target_qubit);
return r;
}
gate_type_t type()
{
return __cphase_gate__;
}
};
/**
* \brief custom matrix gate
*
*/
class custom : public gate
{
private:
// std::vector<uint64_t> qubits;
uint64_t qubit;
cmatrix_t m;
public:
// #ifdef __BUILTIN_LINALG__
// custom(std::vector<uint64_t> qubits, qx::linalg::matrix<complex_t> m) : qubits(qubits), m(m)
// #else
custom(uint64_t qubit, cmatrix_t m) : qubit(qubit), m(m)
// #endif
{
// uint64_t size = 1 << qubits.size();
// if (size != m.size1() || size != m.size2())
// println("[x] error: cutom gate : the matrix size do not match the number of qubits !");
// verify also that the matrix is unitary
// #ifdef __BUILTIN_LINALG__
// cmatrix_t ctr(m.size2(),m.size1());
// qx::linalg::matrix<complex_t> ctr(m.size2(),m.size1());
// for (uint64_t i=0; i<m.size2(); ++i)
// for (uint64_t j=0; j<m.size1(); ++j)
// ctr(i,j) = m(j,i).conj();
// // cmatrix_t mxctr = mxm(m,ctr);
// qx::linalg::matrix<complex_t> mxctr = mxm(m,ctr);
// qx::linalg::identity_matrix<complex_t> id(m.size1());
// #else
// cmatrix_t mxctr = mxm(m,ublas::trans(conj(m)));
// ublas::identity_matrix<complex_t> id(m.size1());
// #endif
// #ifdef __BUILTIN_LINALG__
// if (qx::linalg::equals(mxctr,id))
// #else
// if (equals(mxctr,id))
// #endif
// println("[x] error: custom gate : the specified matrix is not unitary !");
}
/**
* apply
*/
int64_t apply(qu_register& qreg)
{
sqg_apply(m,qubit,qreg);
qreg.set_measurement_prediction(qubit,__state_unknown__);
return 0;
}
/**
* dump
*/
void dump()
{
println(" [-] custom matrix on qubit " << qubit);
// println(" [-] custom(qubits=" << qubits << ", matrix=" << m << ")");
}
/**
* type
*/
gate_type_t type()
{
return __custom_gate__;
}
};
double p1_worker(uint64_t cs, uint64_t ce, uint64_t qubit, cvector_t * p_data)
{
cvector_t &data = * p_data;
double local_p1 = 0;
uint64_t ref = 1UL << qubit;
uint64_t offset = 0;
// We need to calculate the "offset_start" in order to maintain the
// correctness of the index calculation in the parallel region
uint64_t reminder = cs % ref;
uint64_t factor = std::floor((cs - reminder) / ref);
uint64_t offset_start = factor * ref;
offset = offset_start;
/* ******************************************************************************* */
// The following for-loop is a decimal-based representation of the identical binary-
// based for-loop:
// uint64_t size = qreg.size();
// uint64_t n = (1 << size);
// std::bitset<MAX_QB_N> b;
// b.reset();
// b.set(qubit);
// for (uint64_t i = b.to_ulong(); i < n; i=b.to_ulong()) {
// p += data[i].norm();
// b = inc(b);
// b.set(qubit);
// }
/* ******************************************************************************* */
for (uint64_t i = cs; i < ce; ++i) {
if (!(i % ref))
offset = ref + i;
local_p1 += data[i + offset].norm();
}
return local_p1;
}
inline double zero_worker_norm(uint64_t cs, uint64_t ce, cvector_t * p_data)
{
uint64_t num_elts = ce - cs;
uint64_t tile_size = std::min<uint64_t>(num_elts, 32UL);
complex_t * vd = p_data->data();
double local_length = 0.;
#if defined(__AVX__)
__m256d sum = _mm256_set1_pd(0.0);
for (uint64_t i=cs; i<ce; i+=tile_size)
{
for (uint64_t j=i, end=std::min(ce,tile_size+i); j<end; j+=2)
{
double * pvd = (double*)&vd[j];
sum = _mm256_add_pd(sum, _mm256_mul_pd(_mm256_load_pd(pvd), _mm256_load_pd(pvd)));
}
}
__m256d r2 = _mm256_hadd_pd(sum, sum);
local_length = _mm_cvtsd_f64(_mm_add_pd(_mm256_extractf128_pd(r2, 1),
_mm256_castpd256_pd128(r2)));
#elif defined(__SSE__)
__m128d sum = _mm_set1_pd(0.0);
for (uint64_t i=cs; i<ce; i+=tile_size)
{
for (uint64_t j=i, end=std::min(ce,tile_size+i); j<end; ++j)
{
double * pvd = (double*)&vd[j];
sum = _mm_add_pd(sum, _mm_mul_pd(_mm_load_pd(pvd), _mm_load_pd(pvd)));
}
}
local_length = _mm_cvtsd_f64(_mm_hadd_pd(sum, sum));
#else
for (uint64_t i=cs; i<ce; i+=tile_size)
{
for (uint64_t j=i, end=std::min(ce,tile_size+i); j<end; j+=2)
{
local_length += data[j].norm() + data[j+1].norm();
}
}
#endif
return local_length;
}
inline double zero_worker_true(uint64_t cs, uint64_t ce, uint64_t s, /*double * length,*/ uint64_t qubit, /*xpu::lockable * l, */cvector_t * p_data)
{
cvector_t &data = * p_data;
uint64_t pos = 1UL << qubit;
for (uint64_t i=cs; i<ce; i+=2)
{
// ((x) & (1<<(pos)))
if (i & pos)
data[i] = 0.0;
if ((i+1) & pos)
data[i+1] = 0.0;
}
return zero_worker_norm(cs, ce, p_data);
}
inline double zero_worker_false(uint64_t cs, uint64_t ce, uint64_t s, /*double * length,*/ uint64_t qubit, /*xpu::lockable * l, */cvector_t * p_data)
{
cvector_t &data = * p_data;
uint64_t pos = 1UL << qubit;
for (uint64_t i=cs; i<ce; i+=2)
{
// ((x) & (1<<(pos)))
if (!(i & pos)) {
data[i] = 0.0;
}
if (!((i+1) & pos))
data[i+1] = 0.0;
}
return zero_worker_norm(cs, ce, p_data);
}
int renorm_worker(uint64_t cs, uint64_t ce, uint64_t s, double * length, cvector_t * p_data)
{
cvector_t &data = * p_data;
double l = *length;
double l_rec = 1./l;
uint64_t num_elts = ce - cs;
uint64_t tile_size = std::min<uint64_t>(num_elts, 16UL);
complex_t * vd = p_data->data();
#ifdef __AVX512F__
__m512d vl = _mm512_set1_pd(l_rec);
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (uint64_t i=cs; i<ce; i+=tile_size)
{
for (uint64_t j=i, end=std::min(ce,tile_size+i); j<end; j+=4)
{
double * pvd = (double*)&vd[j];
_mm512_store_pd(pvd, _mm512_mul_pd(_mm512_load_pd(pvd), vl));
}
}
#elif defined(__AVX__)
__m256d vl = _mm256_set1_pd(l_rec);
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (int64_t i=cs; i<(int64_t)ce; i+=tile_size)
{
for (uint64_t j=(uint64_t)i, end=std::min(ce,tile_size+(uint64_t)i); j<end; j+=2)
{
double * pvd = (double*)&vd[j];
_mm256_store_pd(pvd, _mm256_mul_pd(_mm256_load_pd(pvd), vl));
}
}
#elif defined(__SSE__)
__m128d vl = _mm_set1_pd(l_rec);
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (int64_t i=cs; i<(int64_t)ce; i+=tile_size)
{
for (uint64_t j=(uint64_t)i, end=std::min(ce,tile_size+(uint64_t)i); j<end; ++j)
{
double * pvd = (double*)&vd[j];
_mm_store_pd(pvd, _mm_mul_pd(_mm_load_pd(pvd), vl));
}
}
#else
#ifdef USE_OPENMP
#pragma omp parallel for
#endif
for (int64_t i=cs; i<(int64_t)ce; i+=tile_size)
{
for (uint64_t j=(uint64_t)i, end=std::min(ce,tile_size+(uint64_t)i); j<end; ++j)
{
data[j] *= l_rec;
}
}
#endif
// // Update the remaining elements if there are any
// uint64_t reminder = num_elts % tile_size;
// if (reminder) {
// for (uint64_t i=ce-reminder; i<ce; ++i)
// {
// data[i] *= l_rec;
// }
// }
return 0;
}
/**
* measure
*/
class measure : public gate
{
private:
uint64_t qubit;
bool measure_all;
bool disable_averaging;
public:
measure(uint64_t qubit, bool disable_averaging=false) : qubit(qubit), measure_all(false), disable_averaging(disable_averaging)
{
}
measure() : qubit(0), measure_all(true)
{
}
int64_t apply(qu_register& qreg)
{
if (measure_all)
{
// qreg.measure();
for (size_t q=0; q<qreg.size(); q++)
qx::measure(q).apply(qreg);
return 0;
}
double f = qreg.rand();
double p = 0;
int64_t value;
uint64_t size = qreg.size();
uint64_t n = (1UL << size);
cvector_t& data = qreg.get_data();
double length = 0;
// Basically, this "if" operator determines what to do if we have more than 64 qubits.
// It also determines whether to invoke parallel or sequential computations. As of now,
// we set parallel execution as the default one.
if (1)//size > 64)
// if (size > 64)
{
// #define PARALLEL_MEASUREMENT
// #ifdef PARALLEL_MEASUREMENT
/*xpu::lockable * l = new xpu::core::os::mutex();
xpu::task p1_worker_t(p1_worker, (uint64_t)0, n, (uint64_t)1, &p, qubit, l, &data);
xpu::parallel_for parallel_p1( (uint64_t)0, n, (uint64_t)1, &p1_worker_t);
parallel_p1.run();*/
static const uint64_t SIZE = 1000;
uint64_t ref = 1UL << qubit;
uint64_t range = (n >> 1);
#ifdef USE_OPENMP
#pragma omp parallel for reduction(+: p)
#endif
for (int64_t batch = 0; batch <= (int64_t)range / SIZE; batch++) {
p += p1_worker(batch*SIZE, std::min<uint64_t>((batch+1)*SIZE, range), qubit, &data);
}
if (f<p) value = 1;
else value = 0;
#ifdef USE_OPENMP
#pragma omp parallel
{
#endif
if (value) {
#ifdef USE_OPENMP
#pragma omp for reduction(+: length)
#endif
for (int64_t batch = 0; batch <= (int64_t)n / SIZE; batch++) {
length += zero_worker_false(batch*SIZE, std::min<uint64_t>((batch+1)*SIZE,n), (uint64_t)1, qubit, &data);
}
}
else {
#ifdef USE_OPENMP
#pragma omp for reduction(+: length)
#endif
for (int64_t batch = 0; batch <= (int64_t)n / SIZE; batch++) {
length += zero_worker_true(batch*SIZE, std::min<uint64_t>((batch+1)*SIZE,n), (uint64_t)1, qubit, &data);
}
}
#ifdef USE_OPENMP
}
#endif
length = std::sqrt(length);
renorm_worker((uint64_t)0, n, (uint64_t)1, &length, &data);
}
else
{
//#else
int64_t k, l, m;
int64_t j = qubit;
double fvalue;
std::bitset<MAX_QB_N> b;
b.reset();
b.set(qubit);
uint64_t bc = b.to_ulong();
while (bc < n)
{
bc = b.to_ulong();
p += data[bc].norm();
b = inc(b);
b.set(qubit);
bc = b.to_ulong();
}
if (f<p) value = 1;
else value = 0;
if (value) // 1
{ // reset all states where the qubit is 0
for (uint64_t i=0; i<(1UL << size); ++i)
{
if (!__bit_test(i,qubit))
data[i] = 0.0;
}
}
else
{
for (uint64_t i=0; i<(1UL << size); ++i)
{
if (__bit_test(i,qubit))
data[i] = 0.0;
}
}
for (uint64_t k = 0; k < (1UL << size); k++)
length += data[k].norm(); //std::norm(data[k]);
length = std::sqrt(length);
for (uint64_t k = 0; k < (1UL << size); k++)
data[k] /= length;
// #endif // PARALLEL_MEASUREMENT
}
// println(" [>] measured value : " << value);
qreg.set_measurement_prediction(qubit,(value == 1 ? __state_1__ : __state_0__));
qreg.set_measurement(qubit,(value == 1 ? true : false));
//qreg.set_binary(qubit,(value == 1 ? __state_1__ : __state_0__));
if (!disable_averaging)
{
if (qreg.measurement_averaging_enabled)
{
if (value == 1)
{
// println("> exited_states++");
qreg.measurement_averaging[qubit].exited_states++;
}
else
{
// println("> ground_states++");
qreg.measurement_averaging[qubit].ground_states++;
}
}
}
return value;
}
void dump()
{
if (measure_all)
println(" [-] measure(register)");
else
println(" [-] measure(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
if (!measure_all)
r.push_back(qubit);
else // this is a dirty hack, itshould be fixed later (unknown qubit number !)
{
for (int64_t i=0; i<MAX_QB_N; ++i)
r.push_back(i);
}
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
if (measure_all)
return __measure_reg_gate__;
else
return __measure_gate__;
}
};
/**
* measure_x
*/
class measure_x : public gate
{
private:
uint64_t qubit;
bool measure_all;
bool disable_averaging;
qx::hadamard hg;
qx::measure mg;
public:
measure_x(uint64_t qubit, bool disable_averaging=false) : qubit(qubit), measure_all(false), hg(qubit), mg(qubit), disable_averaging(disable_averaging)
{
}
measure_x() : qubit(0), hg(qubit), mg(qubit), measure_all(true)
{
}
int64_t apply(qu_register& qreg)
{
int64_t r = 0;
if (measure_all)
{
for (size_t i=0; i<qreg.size(); ++i)
qx::hadamard(i).apply(qreg);
qreg.measure();
for (size_t i=0; i<qreg.size(); ++i)
qx::hadamard(i).apply(qreg);
return 0;
}
hg.apply(qreg);
r = mg.apply(qreg);
hg.apply(qreg);
return r;
}
void dump()
{
if (measure_all)
println(" [-] measure_x(register)");
else
println(" [-] measure_x(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
if (!measure_all)
r.push_back(qubit);
else // this is a dirty hack, itshould be fixed later (unknown qubit number !)
{
for (int64_t i=0; i<MAX_QB_N; ++i)
r.push_back(i);
}
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
if (measure_all)
return __measure_x_reg_gate__;
else
return __measure_x_gate__;
}
};
/**
* measure_y
*/
class measure_y : public gate
{
private:
uint64_t qubit;
bool measure_all;
bool disable_averaging;
qx::phase_shift sg;
qx::pauli_z zg;
qx::measure_x mg;
/*
S(qubit);
Z(qubit);
bool b = MeasX(qubit, randint);
S(qubit);
*/
public:
measure_y(uint64_t qubit, bool disable_averaging=false) : qubit(qubit), measure_all(false), sg(qubit), zg(qubit), mg(qubit), disable_averaging(disable_averaging)
{
}
measure_y() : qubit(0), sg(qubit), zg(qubit), mg(), measure_all(true)
{
}
int64_t apply(qu_register& qreg)
{
int64_t r = 0;
if (measure_all)
{
for (size_t i=0; i<qreg.size(); ++i)
{
qx::phase_shift(i).apply(qreg);
qx::pauli_z(i).apply(qreg);
}
mg.apply(qreg);
for (size_t i=0; i<qreg.size(); ++i)
qx::phase_shift(i).apply(qreg);
return 0;
}
sg.apply(qreg);
zg.apply(qreg);
r = mg.apply(qreg);
sg.apply(qreg);
return r;
}
void dump()
{
if (measure_all)
println(" [-] measure_y(register)");
else
println(" [-] measure_y(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
if (!measure_all)
r.push_back(qubit);
else // this is a dirty hack, itshould be fixed later (unknown qubit number !)
{
for (int64_t i=0; i<MAX_QB_N; ++i)
r.push_back(i);
}
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
if (measure_all)
return __measure_y_reg_gate__;
else
return __measure_y_gate__;
}
};
/**
* \brief generic binary controlled gate
*/
class bin_ctrl : public gate
{
private:
// uint64_t bit;
std::vector<size_t> bits;
gate * g;
public:
bin_ctrl(size_t bit, gate * g) : g(g)
{
bits.push_back(bit);
}
bin_ctrl(std::vector<size_t> bit, gate * g) : g(g)
{
for (auto b : bit)
bits.push_back(b);
}
int64_t apply(qu_register& qreg)
{
bool m = true;
for (auto b : bits)
if (!qreg.test(b))
m = false;
if (m)
g->apply(qreg);
return 0;
}
gate * get_gate()
{
return g;
}
std::vector<size_t> get_bits()
{
return bits;
}
void dump()
{
print(" [-] bin_ctrl: \n bit=" << bits[0] << " -> ");
g->dump();
}
std::vector<uint64_t> qubits()
{
return g->qubits();
}
std::vector<uint64_t> control_qubits()
{
return g->control_qubits();
}
std::vector<uint64_t> target_qubits()
{
return g->target_qubits();
}
gate_type_t type()
{
return __bin_ctrl_gate__;
}
};
#define bin_ctrl_pauli_x(b,q) bin_ctrl(b,new pauli_x(q))
#define bin_ctrl_pauli_y(b,q) bin_ctrl(b,new pauli_y(q))
#define bin_ctrl_pauli_z(b,q) bin_ctrl(b,new pauli_z(q))
/**
* \brief classical binary not gate
*/
class classical_not : public gate
{
private:
uint64_t bit;
public:
classical_not(uint64_t bit) : bit(bit)
{
}
int64_t apply(qu_register& qreg)
{
qreg.flip_measurement(bit);
return 0;
}
uint64_t get_bit()
{
return bit;
}
void dump()
{
// println(" [-] classical not gate: \n bit=" << bit);
println(" [-] not " << bit);
}
std::vector<uint64_t> qubits()
{
return std::vector<uint64_t>();
}
std::vector<uint64_t> control_qubits()
{
return std::vector<uint64_t>();
}
std::vector<uint64_t> target_qubits()
{
return std::vector<uint64_t>();
}
gate_type_t type()
{
return __classical_not_gate__;
}
};
/**
* prepz
*/
class prepz : public gate
{
private:
uint64_t qubit;
public:
prepz(uint64_t qubit) : qubit(qubit)
{
}
int64_t apply(qu_register& qreg)
{
measure(qubit,true).apply(qreg);
bin_ctrl_pauli_x(qubit,qubit).apply(qreg);
// bin_ctrl_pauli_z(qubit,qubit).apply(qreg);
qreg.set_measurement(qubit,false);
return 0;
}
void dump()
{
println(" [-] prepz(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
return __prepz_gate__;
}
};
/**
* prepx
*/
class prepx : public gate
{
private:
uint64_t qubit;
hadamard h;
public:
prepx(uint64_t qubit) : qubit(qubit), h(qubit)
{
}
int64_t apply(qu_register& qreg)
{
h.apply(qreg);
measure(qubit,true).apply(qreg);
h.apply(qreg);
bin_ctrl_pauli_z(qubit,qubit).apply(qreg);
qreg.set_measurement(qubit,false);
return 0;
}
void dump()
{
println(" [-] prepx(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
return __prepx_gate__;
}
};
/**
* prepy
*/
class prepy : public gate
{
private:
uint64_t qubit;
prepx px;
phase_shift s;
public:
prepy(uint64_t qubit) : qubit(qubit), px(qubit), s(qubit)
{
}
int64_t apply(qu_register& qreg)
{
px.apply(qreg);
s.apply(qreg);
qreg.set_measurement(qubit,false);
return 0;
}
void dump()
{
println(" [-] prepy(qubit=" << qubit << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
r.push_back(qubit);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
return __prepy_gate__;
}
};
class lookup_gate_table : public gate
{
private:
std::vector<uint64_t> ctrl_bits;
std::map<uint64_t,gate *> gates;
public:
lookup_gate_table(uint64_t b0)
{
ctrl_bits.push_back(b0);
}
lookup_gate_table(uint64_t b0, uint64_t b1)
{
ctrl_bits.push_back(b0);
ctrl_bits.push_back(b1);
}
lookup_gate_table(uint64_t b0, uint64_t b1, uint64_t b2)
{
ctrl_bits.push_back(b0);
ctrl_bits.push_back(b1);
ctrl_bits.push_back(b2);
}
lookup_gate_table(std::vector<uint64_t> ctrl_bits) : ctrl_bits(ctrl_bits)
{
}
void add_gate(uint64_t cond, gate * g)
{
assert(cond < (1<< ctrl_bits.size()));
gates[cond] = g;
}
int64_t apply(qu_register& qreg)
{
uint64_t k = 0;
for (uint64_t i=0; i<ctrl_bits.size(); i++)
{
//println(qreg.get_binary(i));
if (qreg.test(ctrl_bits[i]))
k = k * 2 + 1;
else
k *= 2;
}
// println("[+] lookup table : cond = " << k);
std::map<uint64_t,gate*>::iterator it = gates.find(k);
if (it != gates.end())
(*it).second->apply(qreg);
return 0;
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
// to do
std::map<uint64_t,gate *>::iterator ig;
for (ig=gates.begin(); ig!=gates.end(); ++ig)
{
std::vector<uint64_t> ri = ig->second->qubits();
r.insert(r.begin(), ri.begin(), ri.end());
}
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
// to do
std::map<uint64_t,gate *>::iterator ig;
for (ig=gates.begin(); ig!=gates.end(); ++ig)
{
std::vector<uint64_t> ri = ig->second->control_qubits();
if (ri.size())
r.insert(r.begin(), ri.begin(), ri.end());
}
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
// to do
std::map<uint64_t,gate *>::iterator ig;
for (ig=gates.begin(); ig!=gates.end(); ++ig)
{
std::vector<uint64_t> ri = ig->second->target_qubits();
if (ri.size())
r.insert(r.begin(), ri.begin(), ri.end());
}
return r;
}
void dump()
{
println(" [-] lookup gate table : ");
}
gate_type_t type()
{
return __lookup_table__;
}
};
/**
* \brief display : debug utility
* display intermediate quantum states of a
* quantum register whithin a circuit.
*/
class display : public gate
{
private:
bool only_binary;
public:
display(bool only_binary=false) : only_binary(only_binary)
{
}
int64_t apply(qu_register& qreg)
{
qreg.dump(only_binary);
return 0;
}
void dump()
{
println(" [-] display(only_binary=" << only_binary << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
return r;
}
gate_type_t type()
{
if (only_binary)
return __display_binary__;
else
return __display__;
}
};
/**
* parallel gates
*/
class parallel_gates : public gate
{
public:
parallel_gates()
{
}
int64_t apply(qu_register& qreg)
{
for (uint64_t i=0; i<gates.size(); i++)
gates[i]->apply(qreg);
return 0;
}
uint64_t add(gate * g)
{
gates.push_back(g);
return gates.size();
}
std::vector<gate *> get_gates()
{
return gates;
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
for (uint64_t i=0; i<gates.size(); i++)
{
std::vector<uint64_t> q = gates[i]->qubits();
r.insert(r.end(),q.begin(),q.end());
}
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
for (uint64_t i=0; i<gates.size(); i++)
{
std::vector<uint64_t> q = gates[i]->control_qubits();
r.insert(r.end(),q.begin(),q.end());
}
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
for (uint64_t i=0; i<gates.size(); i++)
{
std::vector<uint64_t> q = gates[i]->target_qubits();
r.insert(r.end(),q.begin(),q.end());
}
return r;
}
void dump()
{
println(" [-] parallel_gates (" << gates.size() << " gates) : ");
for (uint64_t i=0; i<gates.size(); i++)
gates[i]->dump();
}
gate_type_t type()
{
return __parallel_gate__;
}
private:
std::vector<gate *> gates; // list of the parallel gates
};
/**
* prepare the qubits into an arbitrary quantum state
*/
class prepare : public gate
{
private:
quantum_state_t * state;
public:
prepare(quantum_state_t * state) : state(state)
{
}
int64_t apply(qu_register& qreg)
{
qreg.reset();
cvector_t& q = qreg.get_data();
double norm = 0;
for (quantum_state_t::iterator i=state->begin(); i != state->end(); ++i)
{
basis_state_t bs = (*i).first;
complex_t c = (*i).second;
// println("bs=" << bs << ", a=" << c);
q[bs] = c;
norm += c.norm(); //std::norm(c);
}
if (std::fabs(norm-1) > QUBIT_ERROR_THRESHOLD)
{
println("[!] warning : the loaded quantum state is not normalized (norm = " << norm << ") !");
println("[!] renormalizing the quantum state...");
qreg.normalize();
println("[!] quantum state renormalized successfully.");
}
for (size_t qi=0; qi<qreg.size(); ++qi)
{
qreg.set_measurement_prediction(qi,__state_unknown__);
//qreg.set_binary(qi,__state_unknown__);
}
return 0;
}
void dump()
{
println(" [-] prepare (quantum_state=" << state << ")");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
// this is a dirty hack, itshould be fixed later (unknown qubit number !)
for (int64_t i=0; i<MAX_QB_N; ++i)
r.push_back(i);
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
return qubits();
}
gate_type_t type()
{
return __prepare_gate__;
}
};
/**
* \brief print : debug utility
* print arbitrary string
*/
class print_str : public gate
{
private:
std::string str;
public:
print_str(std::string& s) : str(s)
{
}
int64_t apply(qu_register& qreg)
{
println(str);
return 0;
}
void dump()
{
println(" print " << str << "\"");
}
std::vector<uint64_t> qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> control_qubits()
{
std::vector<uint64_t> r;
return r;
}
std::vector<uint64_t> target_qubits()
{
std::vector<uint64_t> r;
return r;
}
gate_type_t type()
{
return __print_str__;
}
};
}
#endif // QX_GATE_H
|
index.c | /*
This source file is part of the Geophysical Fluids Modeling Framework (GAME), which is released under the MIT license.
Github repository: https://github.com/OpenNWP/GAME
*/
/*
This file collects functions that are relevant for the neighbourship relations of the grid cells and edges.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <geos95.h>
#include <netcdf.h>
#include <math.h>
#include "../../src/game_types.h"
#include "include.h"
#define ERRCODE 2
#define ERR(e) {printf("Error: %s\n", nc_strerror(e)); exit(ERRCODE);}
int find_adjacent_vector_indices_h(int from_index[], int to_index[], int adjacent_signs_h[], int adjacent_vector_indices_h[])
{
/*
This function finds the horizontal vectors that are adjacent to a grid cell.
*/
int trouble_detected = 0;
int counter;
#pragma omp parallel for private(trouble_detected, counter)
for (int i = 0; i < NO_OF_SCALARS_H; ++i)
{
counter = 0;
for (int j = 0; j < NO_OF_VECTORS_H; ++j)
{
if (from_index[j] == i || to_index[j] == i)
{
if (from_index[j] == to_index[j])
{
printf("It is from_index == to_index at point %d.\n", j);
exit(1);
}
adjacent_vector_indices_h[6*i + counter] = j;
if (from_index[j] == i)
{
adjacent_signs_h[6*i + counter] = 1;
}
if (to_index[j] == i)
{
adjacent_signs_h[6*i + counter] = -1;
}
++counter;
}
}
if (counter != 6)
{
trouble_detected = 1;
if (counter == 5 && i < NO_OF_PENTAGONS)
{
trouble_detected = 0;
}
}
if (trouble_detected == 1)
{
printf("Trouble detected, place 1.\n");
exit(1);
}
if (i < NO_OF_PENTAGONS)
{
adjacent_vector_indices_h[6*i + 5] = -1;
adjacent_signs_h[6*i + 5] = 0;
}
}
int no_of_edges, double_check, sign_sum_check;
for (int i = 0; i < NO_OF_VECTORS_H; ++i)
{
counter = 0;
sign_sum_check = 0;
for (int j = 0; j < NO_OF_SCALARS_H; ++j)
{
no_of_edges = 6;
if (j < NO_OF_PENTAGONS)
{
no_of_edges = 5;
}
double_check = 0;
for (int k = 0; k < no_of_edges; ++k)
{
if (adjacent_vector_indices_h[6*j + k] == i)
{
++counter;
++double_check;
sign_sum_check += adjacent_signs_h[6*j + k];
}
}
if (double_check > 1)
{
printf("Same vector twice in adjacent_vector_indices_h of same grid cell.\n");
exit(1);
}
}
if (sign_sum_check != 0)
{
printf("Problem with adjacent_signs_h.\n");
}
if (counter != 2)
{
printf("Problem with adjacent_vector_indices_h.\n");
}
}
return 0;
}
|
app_main.c | #include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include "bmp_interface.h"
#include <omp.h>
extern int __htc_get_unit_count();
extern int global_radius;
void foo(uint8_t *a) {
#pragma omp target teams num_teams(8)
{
a[omp_get_team_num()] = omp_get_team_num();
}
printf("end of function foo\n");
}
int app_main(int argc, char **argv) {
uint32_t bufsize = 1000;
uint32_t bufsize2 = 1000;
// Allocate target temp buffer.
extern void *stencil_cp_alloc(size_t);
uint8_t *unew = (uint8_t *)stencil_cp_alloc(bufsize * sizeof(uint8_t));
printf("unit count is %d\n", __htc_get_unit_count());
int i;
int k;
#pragma omp target
#pragma omp teams distribute parallel for num_threads(17) firstprivate(k) schedule(static,33) num_teams(5)
for (i = 0; i < bufsize; i++) {
k = (int)omp_get_team_num() + bufsize - bufsize2;
// printf("first target team %d thread %d i is %d\n", k,
// (int)omp_get_thread_num(), i);
unew[i] = (omp_get_team_num()+1) * omp_get_thread_num() + k - k;
}
#pragma omp target
#pragma omp teams distribute parallel for num_threads(7) firstprivate(k) schedule(static,33) num_teams(8)
for (i = 0; i < bufsize; i++) {
k = (int)omp_get_team_num();
// printf("second target team %d thread %d i is %d\n", k,
// (int)omp_get_thread_num(), i);
unew[i] += (omp_get_team_num()+1) * omp_get_thread_num();
}
int sum = 0;
for (i = 0; i < bufsize; i++) {
sum += unew[i];
}
printf("sum is %d %s\n", sum, (sum == 13977) ? "PASSED" : "FAILED");
foo(unew);
for (i = 0; i < 8; i++) {
sum += unew[i];
}
printf("sum is %d %s\n", sum, (sum == (13977+28)) ? "PASSED" : "FAILED");
return 0;
}
|
GB_binop__times_fc64.c |
//------------------------------------------------------------------------------
// GB_binop: hard-coded functions for each built-in binary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2022, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated2/ folder, do not edit it
// (it is auto-generated from Generator/*).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_emult.h"
#include "GB_control.h"
#include "GB_ek_slice.h"
#include "GB_dense.h"
#include "GB_atomics.h"
#include "GB_bitmap_assign_methods.h"
#include "GB_binop__include.h"
// C=binop(A,B) is defined by the following types and operators:
// A+B function (eWiseAdd): GB (_AaddB__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_08__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_02__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_04__times_fc64)
// A.*B function (eWiseMult): GB (_AemultB_bitmap__times_fc64)
// A*D function (colscale): GB (_AxD__times_fc64)
// D*A function (rowscale): GB (_DxB__times_fc64)
// C+=B function (dense accum): GB (_Cdense_accumB__times_fc64)
// C+=b function (dense accum): GB (_Cdense_accumb__times_fc64)
// C+=A+B function (dense ewise3): GB (_Cdense_ewise3_accum__times_fc64)
// C=A+B function (dense ewise3): GB (_Cdense_ewise3_noaccum__times_fc64)
// C=scalar+B GB (_bind1st__times_fc64)
// C=scalar+B' GB (_bind1st_tran__times_fc64)
// C=A+scalar GB (_bind2nd__times_fc64)
// C=A'+scalar GB (_bind2nd_tran__times_fc64)
// C type: GxB_FC64_t
// A type: GxB_FC64_t
// A pattern? 0
// B type: GxB_FC64_t
// B pattern? 0
// BinaryOp: cij = GB_FC64_mul (aij, bij)
#define GB_ATYPE \
GxB_FC64_t
#define GB_BTYPE \
GxB_FC64_t
#define GB_CTYPE \
GxB_FC64_t
// true if the types of A and B are identical
#define GB_ATYPE_IS_BTYPE \
1
// true if the types of C and A are identical
#define GB_CTYPE_IS_ATYPE \
1
// true if the types of C and B are identical
#define GB_CTYPE_IS_BTYPE \
1
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA,A_iso) \
GxB_FC64_t aij = GBX (Ax, pA, A_iso)
// true if values of A are not used
#define GB_A_IS_PATTERN \
0 \
// bij = Bx [pB]
#define GB_GETB(bij,Bx,pB,B_iso) \
GxB_FC64_t bij = GBX (Bx, pB, B_iso)
// true if values of B are not used
#define GB_B_IS_PATTERN \
0 \
// declare scalar of the same type as C
#define GB_CTYPE_SCALAR(t) \
GxB_FC64_t t
// cij = Ax [pA]
#define GB_COPY_A_TO_C(cij,Ax,pA,A_iso) \
cij = GBX (Ax, pA, A_iso)
// cij = Bx [pB]
#define GB_COPY_B_TO_C(cij,Bx,pB,B_iso) \
cij = GBX (Bx, pB, B_iso)
#define GB_CX(p) Cx [p]
// binary operator
#define GB_BINOP(z,x,y,i,j) \
z = GB_FC64_mul (x, y) ;
// true if the binop must be flipped
#define GB_BINOP_FLIP \
0
// op is second
#define GB_OP_IS_SECOND \
0
// do the numerical phases of GB_add and GB_emult
#define GB_PHASE_2_OF_2
// hard-coded loops can be vectorized
#define GB_PRAGMA_SIMD_VECTORIZE GB_PRAGMA_SIMD
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_TIMES || GxB_NO_FC64 || GxB_NO_TIMES_FC64)
//------------------------------------------------------------------------------
// C += A+B, all 3 matrices dense
//------------------------------------------------------------------------------
// The op must be MIN, MAX, PLUS, MINUS, RMINUS, TIMES, DIV, or RDIV.
void GB (_Cdense_ewise3_accum__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_accum_template.c"
}
//------------------------------------------------------------------------------
// C = A+B, all 3 matrices dense
//------------------------------------------------------------------------------
void GB (_Cdense_ewise3_noaccum__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix B,
const int nthreads
)
{
#include "GB_dense_ewise3_noaccum_template.c"
}
//------------------------------------------------------------------------------
// C += B, accumulate a sparse matrix into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumB__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix B,
const int64_t *B_ek_slicing, const int B_ntasks, const int B_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
#include "GB_dense_subassign_23_template.c"
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C += b, accumulate a scalar into a dense matrix
//------------------------------------------------------------------------------
GrB_Info GB (_Cdense_accumb__times_fc64)
(
GrB_Matrix C,
const GB_void *p_bwork,
const int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
{
// get the scalar b for C += b, of type GxB_FC64_t
GxB_FC64_t bwork = (*((GxB_FC64_t *) p_bwork)) ;
#include "GB_dense_subassign_22_template.c"
return (GrB_SUCCESS) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = A*D, column scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_AxD__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GrB_Matrix D,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_colscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = D*B, row scale with diagonal D matrix
//------------------------------------------------------------------------------
GrB_Info GB (_DxB__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix D,
const GrB_Matrix B,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *restrict Cx = (GxB_FC64_t *) C->x ;
#include "GB_AxB_rowscale_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseAdd: C=A+B, C<M>=A+B, C<!M>=A+B
//------------------------------------------------------------------------------
GrB_Info GB (_AaddB__times_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool is_eWiseUnion,
const GB_void *alpha_scalar_in,
const GB_void *beta_scalar_in,
const bool Ch_is_Mh,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GB_WERK_DECLARE (M_ek_slicing, int64_t) ;
GB_WERK_DECLARE (A_ek_slicing, int64_t) ;
GB_WERK_DECLARE (B_ek_slicing, int64_t) ;
GxB_FC64_t alpha_scalar ;
GxB_FC64_t beta_scalar ;
if (is_eWiseUnion)
{
alpha_scalar = (*((GxB_FC64_t *) alpha_scalar_in)) ;
beta_scalar = (*((GxB_FC64_t *) beta_scalar_in )) ;
}
#include "GB_add_template.c"
GB_FREE_WORKSPACE ;
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, or C<M!>=A.*B where C is sparse/hyper
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_08__times_fc64)
(
GrB_Matrix C,
const int C_sparsity,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict C_to_M,
const int64_t *restrict C_to_A,
const int64_t *restrict C_to_B,
const GB_task_struct *restrict TaskList,
const int C_ntasks,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_08_meta.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<#> = A.*B when A is sparse/hyper and B is bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_02__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const bool flipxy,
const int64_t *restrict Cp_kfirst,
const int64_t *A_ek_slicing, const int A_ntasks, const int A_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#if GB_BINOP_FLIP
// The operator is not commutative, and does not have a flipped
// variant. For example z=atan2(y,x).
if (flipxy)
{
// use fmult(y,x)
#undef GB_FLIPPED
#define GB_FLIPPED 1
#include "GB_emult_02_template.c"
}
else
{
// use fmult(x,y)
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
}
#else
// No need to handle the flip: the operator is either commutative, or
// has been handled by changing z=div(y,x) to z=rdiv(x,y) for example.
#undef GB_FLIPPED
#define GB_FLIPPED 0
#include "GB_emult_02_template.c"
#endif
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C<M> = A.*B, M sparse/hyper, A and B bitmap/full
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_04__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix M,
const bool Mask_struct,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *restrict Cp_kfirst,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_emult_04_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// eWiseMult: C=A.*B, C<M>=A.*B, C<!M>=A.*B where C is bitmap
//------------------------------------------------------------------------------
GrB_Info GB (_AemultB_bitmap__times_fc64)
(
GrB_Matrix C,
const int ewise_method,
const GrB_Matrix M,
const bool Mask_struct,
const bool Mask_comp,
const GrB_Matrix A,
const GrB_Matrix B,
const int64_t *M_ek_slicing, const int M_ntasks, const int M_nthreads,
const int C_nthreads,
GB_Context Context
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_bitmap_emult_template.c"
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (x,Bx): apply a binary operator to a matrix with scalar bind1st
//------------------------------------------------------------------------------
GrB_Info GB (_bind1st__times_fc64)
(
GB_void *Cx_output, // Cx and Bx may be aliased
const GB_void *x_input,
const GB_void *Bx_input,
const int8_t *restrict Bb,
int64_t bnz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t x = (*((GxB_FC64_t *) x_input)) ;
GxB_FC64_t *Bx = (GxB_FC64_t *) Bx_input ;
int64_t p ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < bnz ; p++)
{
if (!GBB (Bb, p)) continue ;
GxB_FC64_t bij = GBX (Bx, p, false) ;
Cx [p] = GB_FC64_mul (x, bij) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// Cx = op (Ax,y): apply a binary operator to a matrix with scalar bind2nd
//------------------------------------------------------------------------------
GrB_Info GB (_bind2nd__times_fc64)
(
GB_void *Cx_output, // Cx and Ax may be aliased
const GB_void *Ax_input,
const GB_void *y_input,
const int8_t *restrict Ab,
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
GxB_FC64_t *Cx = (GxB_FC64_t *) Cx_output ;
GxB_FC64_t *Ax = (GxB_FC64_t *) Ax_input ;
GxB_FC64_t y = (*((GxB_FC64_t *) y_input)) ;
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!GBB (Ab, p)) continue ;
GxB_FC64_t aij = GBX (Ax, p, false) ;
Cx [p] = GB_FC64_mul (aij, y) ;
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (x, A'): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (x, aij), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_mul (x, aij) ; \
}
GrB_Info GB (_bind1st_tran__times_fc64)
(
GrB_Matrix C,
const GB_void *x_input,
const GrB_Matrix A,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
// GB_unop_transpose.c uses GB_ATYPE, but A is
// the 2nd input to binary operator z=f(x,y).
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t x = (*((const GxB_FC64_t *) x_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
#undef GB_ATYPE
#define GB_ATYPE \
GxB_FC64_t
}
//------------------------------------------------------------------------------
// C = op (A', y): transpose and apply a binary operator
//------------------------------------------------------------------------------
// cij = op (aij, y), no typecasting (in spite of the macro name)
#undef GB_CAST_OP
#define GB_CAST_OP(pC,pA) \
{ \
GxB_FC64_t aij = GBX (Ax, pA, false) ; \
Cx [pC] = GB_FC64_mul (aij, y) ; \
}
GrB_Info GB (_bind2nd_tran__times_fc64)
(
GrB_Matrix C,
const GrB_Matrix A,
const GB_void *y_input,
int64_t *restrict *Workspaces,
const int64_t *restrict A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
GxB_FC64_t y = (*((const GxB_FC64_t *) y_input)) ;
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
GB_unop__log2_fp32_fp32.c | //------------------------------------------------------------------------------
// GB_unop: hard-coded functions for each built-in unary operator
//------------------------------------------------------------------------------
// SuiteSparse:GraphBLAS, Timothy A. Davis, (c) 2017-2021, All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
//------------------------------------------------------------------------------
// If this file is in the Generated/ folder, do not edit it (auto-generated).
#include "GB.h"
#ifndef GBCOMPACT
#include "GB_control.h"
#include "GB_atomics.h"
#include "GB_unop__include.h"
// C=unop(A) is defined by the following types and operators:
// op(A) function: GB_unop_apply__log2_fp32_fp32
// op(A') function: GB_unop_tran__log2_fp32_fp32
// C type: float
// A type: float
// cast: float cij = aij
// unaryop: cij = log2f (aij)
#define GB_ATYPE \
float
#define GB_CTYPE \
float
// aij = Ax [pA]
#define GB_GETA(aij,Ax,pA) \
float aij = Ax [pA]
#define GB_CX(p) Cx [p]
// unary operator
#define GB_OP(z, x) \
z = log2f (x) ;
// casting
#define GB_CAST(z, aij) \
float z = aij ;
// cij = op (aij)
#define GB_CAST_OP(pC,pA) \
{ \
/* aij = Ax [pA] */ \
float aij = Ax [pA] ; \
/* Cx [pC] = op (cast (aij)) */ \
float z = aij ; \
Cx [pC] = log2f (z) ; \
}
// true if operator is the identity op with no typecasting
#define GB_OP_IS_IDENTITY_WITH_NO_TYPECAST \
0
// disable this operator and use the generic case if these conditions hold
#define GB_DISABLE \
(GxB_NO_LOG2 || GxB_NO_FP32)
//------------------------------------------------------------------------------
// Cx = op (cast (Ax)): apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_apply__log2_fp32_fp32
(
float *Cx, // Cx and Ax may be aliased
const float *Ax,
const int8_t *GB_RESTRICT Ab, // A->b if A is bitmap
int64_t anz,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
int64_t p ;
if (Ab == NULL)
{
#if ( GB_OP_IS_IDENTITY_WITH_NO_TYPECAST )
GB_memcpy (Cx, Ax, anz * sizeof (float), nthreads) ;
#else
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log2f (z) ;
}
#endif
}
else
{
// bitmap case, no transpose; A->b already memcpy'd into C->b
#pragma omp parallel for num_threads(nthreads) schedule(static)
for (p = 0 ; p < anz ; p++)
{
if (!Ab [p]) continue ;
float aij = Ax [p] ;
float z = aij ;
Cx [p] = log2f (z) ;
}
}
return (GrB_SUCCESS) ;
#endif
}
//------------------------------------------------------------------------------
// C = op (cast (A')): transpose, typecast, and apply a unary operator
//------------------------------------------------------------------------------
GrB_Info GB_unop_tran__log2_fp32_fp32
(
GrB_Matrix C,
const GrB_Matrix A,
int64_t *GB_RESTRICT *Workspaces,
const int64_t *GB_RESTRICT A_slice,
int nworkspaces,
int nthreads
)
{
#if GB_DISABLE
return (GrB_NO_VALUE) ;
#else
#include "GB_unop_transpose.c"
return (GrB_SUCCESS) ;
#endif
}
#endif
|
hypre_merge_sort.c | /******************************************************************************
* Copyright 1998-2019 Lawrence Livermore National Security, LLC and other
* HYPRE Project Developers. See the top-level COPYRIGHT file for details.
*
* SPDX-License-Identifier: (Apache-2.0 OR MIT)
******************************************************************************/
#include "_hypre_utilities.h"
#include "hypre_hopscotch_hash.h"
#include "../seq_mv/HYPRE_seq_mv.h"
//#define DBG_MERGE_SORT
#ifdef DBG_MERGE_SORT
#include <assert.h>
#include <algorithm>
#include <unordered_map>
#endif
#define SWAP(T, a, b) do { T tmp = a; a = b; b = tmp; } while (0)
/* union of two sorted (in ascending order) array arr1 and arr2 into arr3
* Assumption: no duplicates in arr1 and arr2
* arr3 should have enough space on entry
* map1 and map2 map arr1 and arr2 to arr3 */
void hypre_union2(HYPRE_Int n1, HYPRE_BigInt *arr1, HYPRE_Int n2, HYPRE_BigInt *arr2, HYPRE_Int *n3, HYPRE_BigInt *arr3,
HYPRE_Int *map1, HYPRE_Int *map2)
{
HYPRE_Int i = 0, j = 0, k = 0;
while (i < n1 && j < n2)
{
if (arr1[i] < arr2[j])
{
if (map1) { map1[i] = k; }
arr3[k++] = arr1[i++];
}
else if (arr1[i] > arr2[j])
{
if (map2) { map2[j] = k; }
arr3[k++] = arr2[j++];
}
else /* == */
{
if (map1) { map1[i] = k; }
if (map2) { map2[j] = k; }
arr3[k++] = arr1[i++];
j++;
}
}
while (i < n1)
{
if (map1) { map1[i] = k; }
arr3[k++] = arr1[i++];
}
while (j < n2)
{
if (map2) { map2[j] = k; }
arr3[k++] = arr2[j++];
}
*n3 = k;
}
static void hypre_merge(HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2, HYPRE_Int *out)
{
for ( ; first1 != last1; ++out)
{
if (first2 == last2)
{
for ( ; first1 != last1; ++first1, ++out)
{
*out = *first1;
}
return;
}
if (*first2 < *first1)
{
*out = *first2;
++first2;
}
else
{
*out = *first1;
++first1;
}
}
for ( ; first2 != last2; ++first2, ++out)
{
*out = *first2;
}
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
static void hypre_big_merge(HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2, HYPRE_BigInt *out)
{
for ( ; first1 != last1; ++out)
{
if (first2 == last2)
{
for ( ; first1 != last1; ++first1, ++out)
{
*out = *first1;
}
return;
}
if (*first2 < *first1)
{
*out = *first2;
++first2;
}
else
{
*out = *first1;
++first1;
}
}
for ( ; first2 != last2; ++first2, ++out)
{
*out = *first2;
}
}
#endif
static void kth_element_(
HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_Int *a1, HYPRE_Int *a2,
HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
while (1)
{
HYPRE_Int i = (left + right)/2; // right < k -> i < k
HYPRE_Int j = k - i - 1;
#ifdef DBG_MERGE_SORT
assert(left <= right && right <= k);
assert(i < k); // i == k implies left == right == k that can never happen
assert(j >= 0 && j < n2);
#endif
if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1]))
{
*out1 = i; *out2 = j + 1;
return;
}
else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1]))
{
*out1 = i + 1; *out2 = j;
return;
}
else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1])
{
// search in left half of a1
right = i - 1;
}
else
{
// search in right half of a1
left = i + 1;
}
}
}
/**
* Partition the input so that
* a1[0:*out1) and a2[0:*out2) contain the smallest k elements
*/
static void kth_element(
HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_Int *a1, HYPRE_Int *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
// either of the inputs is empty
if (n1 == 0)
{
*out1 = 0; *out2 = k;
return;
}
if (n2 == 0)
{
*out1 = k; *out2 = 0;
return;
}
if (k >= n1 + n2)
{
*out1 = n1; *out2 = n2;
return;
}
// one is greater than the other
if (k < n1 && a1[k] <= a2[0])
{
*out1 = k; *out2 = 0;
return;
}
if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1])
{
*out1 = n1; *out2 = k - n1;
return;
}
if (k < n2 && a2[k] <= a1[0])
{
*out1 = 0; *out2 = k;
return;
}
if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1])
{
*out1 = k - n2; *out2 = n2;
return;
}
// now k > 0
// faster to do binary search on the shorter sequence
if (n1 > n2)
{
SWAP(HYPRE_Int, n1, n2);
SWAP(HYPRE_Int *, a1, a2);
SWAP(HYPRE_Int *, out1, out2);
}
if (k < (n1 + n2)/2)
{
kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k);
}
else
{
// when k is big, faster to find (n1 + n2 - k)th biggest element
HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0);
HYPRE_Int new_k = k - offset1 - offset2;
HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1);
HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1);
kth_element_(out1, out2, a1 + offset1, a2 + offset2, 0, new_n1 - 1, new_n1, new_n2, new_k);
*out1 += offset1;
*out2 += offset2;
}
#ifdef DBG_MERGE_SORT
assert(*out1 + *out2 == k);
#endif
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
static void big_kth_element_(
HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_BigInt *a1, HYPRE_BigInt *a2,
HYPRE_Int left, HYPRE_Int right, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
while (1)
{
HYPRE_Int i = (left + right)/2; // right < k -> i < k
HYPRE_Int j = k - i - 1;
#ifdef DBG_MERGE_SORT
assert(left <= right && right <= k);
assert(i < k); // i == k implies left == right == k that can never happen
assert(j >= 0 && j < n2);
#endif
if ((j == -1 || a1[i] >= a2[j]) && (j == n2 - 1 || a1[i] <= a2[j + 1]))
{
*out1 = i; *out2 = j + 1;
return;
}
else if (j >= 0 && a2[j] >= a1[i] && (i == n1 - 1 || a2[j] <= a1[i + 1]))
{
*out1 = i + 1; *out2 = j;
return;
}
else if (a1[i] > a2[j] && j != n2 - 1 && a1[i] > a2[j+1])
{
// search in left half of a1
right = i - 1;
}
else
{
// search in right half of a1
left = i + 1;
}
}
}
/**
* Partition the input so that
* a1[0:*out1) and a2[0:*out2) contain the smallest k elements
*/
static void big_kth_element(
HYPRE_Int *out1, HYPRE_Int *out2,
HYPRE_BigInt *a1, HYPRE_BigInt *a2, HYPRE_Int n1, HYPRE_Int n2, HYPRE_Int k)
{
// either of the inputs is empty
if (n1 == 0)
{
*out1 = 0; *out2 = k;
return;
}
if (n2 == 0)
{
*out1 = k; *out2 = 0;
return;
}
if (k >= n1 + n2)
{
*out1 = n1; *out2 = n2;
return;
}
// one is greater than the other
if (k < n1 && a1[k] <= a2[0])
{
*out1 = k; *out2 = 0;
return;
}
if (k - n1 >= 0 && a2[k - n1] >= a1[n1 - 1])
{
*out1 = n1; *out2 = k - n1;
return;
}
if (k < n2 && a2[k] <= a1[0])
{
*out1 = 0; *out2 = k;
return;
}
if (k - n2 >= 0 && a1[k - n2] >= a2[n2 - 1])
{
*out1 = k - n2; *out2 = n2;
return;
}
// now k > 0
// faster to do binary search on the shorter sequence
if (n1 > n2)
{
SWAP(HYPRE_Int, n1, n2);
SWAP(HYPRE_BigInt *, a1, a2);
SWAP(HYPRE_Int *, out1, out2);
}
if (k < (n1 + n2)/2)
{
big_kth_element_(out1, out2, a1, a2, 0, hypre_min(n1 - 1, k), n1, n2, k);
}
else
{
// when k is big, faster to find (n1 + n2 - k)th biggest element
HYPRE_Int offset1 = hypre_max(k - n2, 0), offset2 = hypre_max(k - n1, 0);
HYPRE_Int new_k = k - offset1 - offset2;
HYPRE_Int new_n1 = hypre_min(n1 - offset1, new_k + 1);
HYPRE_Int new_n2 = hypre_min(n2 - offset2, new_k + 1);
big_kth_element_(out1, out2, a1 + (HYPRE_BigInt)offset1, a2 + (HYPRE_BigInt)offset2, 0, new_n1 - 1, new_n1, new_n2, new_k);
*out1 += offset1;
*out2 += offset2;
}
#ifdef DBG_MERGE_SORT
assert(*out1 + *out2 == k);
#endif
}
#endif
/**
* @param num_threads number of threads that participate in this merge
* @param my_thread_num thread id (zeor-based) among the threads that participate in this merge
*/
static void hypre_parallel_merge(
HYPRE_Int *first1, HYPRE_Int *last1, HYPRE_Int *first2, HYPRE_Int *last2,
HYPRE_Int *out,
HYPRE_Int num_threads, HYPRE_Int my_thread_num)
{
HYPRE_Int n1 = last1 - first1;
HYPRE_Int n2 = last2 - first2;
HYPRE_Int n = n1 + n2;
HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads;
HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n);
HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n);
#ifdef DBG_MERGE_SORT
assert(std::is_sorted(first1, last1));
assert(std::is_sorted(first2, last2));
#endif
HYPRE_Int begin1, begin2, end1, end2;
kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank);
kth_element(&end1, &end2, first1, first2, n1, n2, end_rank);
while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
begin1--; begin2++;
}
while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
end1--; end2++;
}
#ifdef DBG_MERGE_SORT
assert(begin1 <= end1);
assert(begin2 <= end2);
#endif
hypre_merge(
first1 + begin1, first1 + end1,
first2 + begin2, first2 + end2,
out + begin1 + begin2);
#ifdef DBG_MERGE_SORT
assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2));
#endif
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
/**
* @param num_threads number of threads that participate in this merge
* @param my_thread_num thread id (zeor-based) among the threads that participate in this merge
*/
static void hypre_big_parallel_merge(
HYPRE_BigInt *first1, HYPRE_BigInt *last1, HYPRE_BigInt *first2, HYPRE_BigInt *last2,
HYPRE_BigInt *out,
HYPRE_Int num_threads, HYPRE_Int my_thread_num)
{
HYPRE_Int n1 = (HYPRE_Int)(last1 - first1);
HYPRE_Int n2 = (HYPRE_Int)(last2 - first2);
HYPRE_Int n = n1 + n2;
HYPRE_Int n_per_thread = (n + num_threads - 1)/num_threads;
HYPRE_Int begin_rank = hypre_min(n_per_thread*my_thread_num, n);
HYPRE_Int end_rank = hypre_min(begin_rank + n_per_thread, n);
#ifdef DBG_MERGE_SORT
assert(std::is_sorted(first1, last1));
assert(std::is_sorted(first2, last2));
#endif
HYPRE_Int begin1, begin2, end1, end2;
big_kth_element(&begin1, &begin2, first1, first2, n1, n2, begin_rank);
big_kth_element(&end1, &end2, first1, first2, n1, n2, end_rank);
while (begin1 > end1 && begin1 > 0 && begin2 < n2 && first1[begin1 - 1] == first2[begin2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
begin1--; begin2++;
}
while (begin2 > end2 && end1 > 0 && end2 < n2 && first1[end1 - 1] == first2[end2])
{
#ifdef DBG_MERGE_SORT
printf("%s:%d\n", __FILE__, __LINE__);
#endif
end1--; end2++;
}
#ifdef DBG_MERGE_SORT
assert(begin1 <= end1);
assert(begin2 <= end2);
#endif
hypre_big_merge(
first1 + (HYPRE_BigInt)begin1, first1 + (HYPRE_BigInt)end1,
first2 + (HYPRE_BigInt)begin2, first2 + (HYPRE_BigInt)end2,
out + (HYPRE_BigInt)(begin1 + begin2));
#ifdef DBG_MERGE_SORT
assert(std::is_sorted(out + begin1 + begin2, out + end1 + end2));
#endif
}
#endif
void hypre_merge_sort(HYPRE_Int *in, HYPRE_Int *temp, HYPRE_Int len, HYPRE_Int **out)
{
if (0 == len) return;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
#ifdef DBG_MERGE_SORT
HYPRE_Int *dbg_buf = new HYPRE_Int[len];
std::copy(in, in + len, dbg_buf);
std::sort(dbg_buf, dbg_buf + len);
#endif
// HYPRE_Int thread_private_len[hypre_NumThreads()];
// HYPRE_Int out_len = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads = hypre_NumActiveThreads();
HYPRE_Int my_thread_num = hypre_GetThreadNum();
// thread-private sort
HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads;
HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len);
HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len);
hypre_qsort0(in, i_begin, i_end - 1);
// merge sorted sequences
HYPRE_Int in_group_size;
HYPRE_Int *in_buf = in;
HYPRE_Int *out_buf = temp;
for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
// merge 2 in-groups into 1 out-group
HYPRE_Int out_group_size = in_group_size*2;
HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size;
// HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1);
HYPRE_Int id_in_group = my_thread_num%out_group_size;
HYPRE_Int num_threads_in_group =
hypre_min(group_leader + out_group_size, num_threads) - group_leader;
HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len);
HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len);
hypre_parallel_merge(
in_buf + in_group1_begin, in_buf + in_group1_end,
in_buf + in_group2_begin, in_buf + in_group2_end,
out_buf + in_group1_begin,
num_threads_in_group,
id_in_group);
HYPRE_Int *temp = in_buf;
in_buf = out_buf;
out_buf = temp;
}
*out = in_buf;
} /* omp parallel */
#ifdef DBG_MERGE_SORT
assert(std::equal(*out, *out + len, dbg_buf));
delete[] dbg_buf;
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
void hypre_sort_and_create_inverse_map(
HYPRE_Int *in, HYPRE_Int len, HYPRE_Int **out, hypre_UnorderedIntMap *inverse_map)
{
if (len == 0)
{
return;
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
HYPRE_Int *temp = hypre_TAlloc(HYPRE_Int, len, HYPRE_MEMORY_HOST);
hypre_merge_sort(in, temp, len, out);
hypre_UnorderedIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads());
HYPRE_Int i;
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < len; i++)
{
HYPRE_Int old = hypre_UnorderedIntMapPutIfAbsent(inverse_map, (*out)[i], i);
assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY);
#ifdef DBG_MERGE_SORT
if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
assert(false);
}
#endif
}
#ifdef DBG_MERGE_SORT
std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len);
for (HYPRE_Int i = 0; i < len; ++i) {
inverse_map2[(*out)[i]] = i;
if (hypre_UnorderedIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
assert(false);
}
}
assert(hypre_UnorderedIntMapSize(inverse_map) == len);
#endif
if (*out == in)
{
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
else
{
hypre_TFree(in, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
#ifdef HYPRE_CONCURRENT_HOPSCOTCH
void hypre_big_merge_sort(HYPRE_BigInt *in, HYPRE_BigInt *temp, HYPRE_Int len, HYPRE_BigInt **out)
{
if (0 == len) return;
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
#ifdef DBG_MERGE_SORT
HYPRE_Int *dbg_buf = new HYPRE_Int[len];
std::copy(in, in + len, dbg_buf);
std::sort(dbg_buf, dbg_buf + len);
#endif
// HYPRE_Int thread_private_len[hypre_NumThreads()];
// HYPRE_Int out_len = 0;
#ifdef HYPRE_USING_OPENMP
#pragma omp parallel
#endif
{
HYPRE_Int num_threads = hypre_NumActiveThreads();
HYPRE_Int my_thread_num = hypre_GetThreadNum();
// thread-private sort
HYPRE_Int i_per_thread = (len + num_threads - 1)/num_threads;
HYPRE_Int i_begin = hypre_min(i_per_thread*my_thread_num, len);
HYPRE_Int i_end = hypre_min(i_begin + i_per_thread, len);
hypre_BigQsort0(in, i_begin, i_end - 1);
// merge sorted sequences
HYPRE_Int in_group_size;
HYPRE_BigInt *in_buf = in;
HYPRE_BigInt *out_buf = temp;
for (in_group_size = 1; in_group_size < num_threads; in_group_size *= 2)
{
#ifdef HYPRE_USING_OPENMP
#pragma omp barrier
#endif
// merge 2 in-groups into 1 out-group
HYPRE_Int out_group_size = in_group_size*2;
HYPRE_Int group_leader = my_thread_num/out_group_size*out_group_size;
// HYPRE_Int group_sub_leader = hypre_min(group_leader + in_group_size, num_threads - 1);
HYPRE_Int id_in_group = my_thread_num%out_group_size;
HYPRE_Int num_threads_in_group =
hypre_min(group_leader + out_group_size, num_threads) - group_leader;
HYPRE_Int in_group1_begin = hypre_min(i_per_thread*group_leader, len);
HYPRE_Int in_group1_end = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_begin = hypre_min(in_group1_begin + i_per_thread*in_group_size, len);
HYPRE_Int in_group2_end = hypre_min(in_group2_begin + i_per_thread*in_group_size, len);
hypre_big_parallel_merge(
in_buf + (HYPRE_BigInt)in_group1_begin, in_buf + (HYPRE_BigInt)in_group1_end,
in_buf + (HYPRE_BigInt)in_group2_begin, in_buf + (HYPRE_BigInt)in_group2_end,
out_buf + (HYPRE_BigInt)in_group1_begin,
num_threads_in_group,
id_in_group);
HYPRE_BigInt *temp = in_buf;
in_buf = out_buf;
out_buf = temp;
}
*out = in_buf;
} /* omp parallel */
#ifdef DBG_MERGE_SORT
assert(std::equal(*out, *out + len, dbg_buf));
delete[] dbg_buf;
#endif
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
void hypre_big_sort_and_create_inverse_map(
HYPRE_BigInt *in, HYPRE_Int len, HYPRE_BigInt **out, hypre_UnorderedBigIntMap *inverse_map)
{
if (len == 0)
{
return;
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] -= hypre_MPI_Wtime();
#endif
HYPRE_BigInt *temp = hypre_TAlloc(HYPRE_BigInt, len, HYPRE_MEMORY_HOST);
hypre_big_merge_sort(in, temp, len, out);
hypre_UnorderedBigIntMapCreate(inverse_map, 2*len, 16*hypre_NumThreads());
HYPRE_Int i;
#pragma omp parallel for HYPRE_SMP_SCHEDULE
for (i = 0; i < len; i++)
{
HYPRE_Int old = hypre_UnorderedBigIntMapPutIfAbsent(inverse_map, (*out)[i], i);
assert(old == HYPRE_HOPSCOTCH_HASH_EMPTY);
#ifdef DBG_MERGE_SORT
if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
assert(false);
}
#endif
}
#ifdef DBG_MERGE_SORT
std::unordered_map<HYPRE_Int, HYPRE_Int> inverse_map2(len);
for (HYPRE_Int i = 0; i < len; ++i) {
inverse_map2[(*out)[i]] = i;
if (hypre_UnorderedBigIntMapGet(inverse_map, (*out)[i]) != i)
{
fprintf(stderr, "%d %d\n", i, (*out)[i]);
assert(false);
}
}
assert(hypre_UnorderedBigIntMapSize(inverse_map) == len);
#endif
if (*out == in)
{
hypre_TFree(temp, HYPRE_MEMORY_HOST);
}
else
{
hypre_TFree(in, HYPRE_MEMORY_HOST);
}
#ifdef HYPRE_PROFILE
hypre_profile_times[HYPRE_TIMER_ID_MERGE] += hypre_MPI_Wtime();
#endif
}
#endif
#endif
/* vim: set tabstop=8 softtabstop=3 sw=3 expandtab: */
|
concat_ref.c | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* License); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*
* Copyright (c) 2021, OPEN AI LAB
* Author: jjzeng@openailab.com
*/
#include "concat_param.h"
#include "graph/tensor.h"
#include "graph/node.h"
#include "graph/graph.h"
#include "module/module.h"
#include "operator/op.h"
#include "device/cpu/cpu_node.h"
#include "device/cpu/cpu_graph.h"
#include "device/cpu/cpu_module.h"
#include "utility/float.h"
#include "utility/sys_port.h"
#include "utility/log.h"
#include <math.h>
#include <string.h>
struct shape_dim
{
int dim[4];
float scale;
int zero;
};
struct concat_op_param
{
struct shape_dim* input_shape;
int input_counts;
int input_dim;
struct shape_dim output_shape;
int output_dim;
int axis;
float out_scale;
void** input_data;
};
static int ref_concat_fp32(const float** in_data, float* out_data, const struct concat_op_param* param)
{
int axis = param->axis;
int concat_dim = 0;
for (int ii = 0; ii < param->input_counts; ++ii)
{
concat_dim += param->input_shape[ii].dim[axis];
}
if (concat_dim != param->output_shape.dim[axis])
{
TLOG_ERR("concant dimensions[%d] is not same output[%d]\n", concat_dim, param->output_shape.dim[axis]);
return -1;
}
int out_size, in_size;
out_size = 1;
for (int ii = 0; ii < axis; ++ii)
{
out_size *= param->output_shape.dim[ii];
}
in_size = 1;
for (int ii = axis + 1; ii < param->output_dim; ++ii)
{
in_size *= param->input_shape[0].dim[ii];
}
float* output_ptr = out_data;
for (int k = 0; k < out_size; ++k)
{
// #pragma omp parallel for num_threads(num_thread)
for (int j = 0; j < param->input_counts; ++j)
{
int cp_size = param->input_shape[j].dim[axis] * in_size;
memcpy(output_ptr, in_data[j] + k * cp_size, cp_size * sizeof(float));
output_ptr += cp_size;
}
}
return 0;
}
static int ref_concat_fp16(const fp16_t** in_data, fp16_t* out_data, const struct concat_op_param* param)
{
int axis = param->axis;
int concat_dim = 0;
for(int ii = 0; ii < param->input_counts; ++ii)
{
concat_dim += param->input_shape[ii].dim[axis];
}
if(concat_dim != param->output_shape.dim[axis])
{
TLOG_ERR("concat dimensions is not same output: ( %d -- %d )\n", concat_dim, param->output_shape.dim[axis]);
return -1;
}
int out_size, in_size;
out_size = 1;
for(int ii = 0; ii < axis; ++ii)
{
out_size *= param->output_shape.dim[ii];
}
in_size = 1;
for(int ii = axis + 1; ii < param->output_dim; ++ii)
{
in_size *= param->input_shape[0].dim[ii];
}
fp16_t* output_ptr = out_data;
for(int k = 0; k < out_size; ++k)
{
for(int j = 0; j < param->input_counts; ++j)
{
int cp_size = param->input_shape[j].dim[axis] * in_size;
memcpy(output_ptr, in_data[j] + k * cp_size, cp_size * sizeof(fp16_t));
output_ptr += cp_size;
}
}
return 0;
}
static int ref_concat_uint8(const uint8_t** in_data, uint8_t* out_data, const struct concat_op_param* param)
{
int axis = param->axis;
int concat_dim = 0;
for (int ii = 0; ii < param->input_counts; ++ii)
{
concat_dim += param->input_shape[ii].dim[axis];
}
if (concat_dim != param->output_shape.dim[axis])
{
TLOG_ERR("concat dimensions is not same output: ( %d -- %d )\n", concat_dim, param->output_shape.dim[axis]);
return -1;
}
int outer_size, in_size;
outer_size = 1;
for (int ii = 0; ii < axis; ++ii)
{
outer_size *= param->output_shape.dim[ii];
}
in_size = 1;
for (int ii = axis + 1; ii < param->output_dim; ++ii)
{
in_size *= param->output_shape.dim[ii];
}
int output_size = 1;
for (int ii = 0; ii < param->output_dim; ++ii)
{
output_size *= param->output_shape.dim[ii];
}
uint8_t* output_ptr = out_data;
float out_scale = param->output_shape.scale;
uint8_t out_zero = param->output_shape.zero;
for (int k = 0; k < outer_size; ++k)
{
for (int j = 0; j < param->input_counts; ++j)
{
int cp_size = param->input_shape[j].dim[axis] * in_size;
float scale = param->input_shape[j].scale;
uint8_t input_zero = param->input_shape[j].zero;
const uint8_t* input_ptr = ( const uint8_t* )(in_data[j] + k * cp_size);
if (scale == out_scale && input_zero == out_zero)
{
memcpy(output_ptr, input_ptr, cp_size);
}
else
{
float t_scale = scale / out_scale;
for (int ii = 0; ii < cp_size; ++ii)
{
output_ptr[ii] = round((input_ptr[ii] - input_zero) * t_scale) + out_zero;
}
}
output_ptr += cp_size;
}
}
return 0;
}
static int ref_concat_int8(const int8_t** in_data, int8_t* out_data, const struct concat_op_param* param)
{
int axis = param->axis;
int concat_dim = 0;
for (int ii = 0; ii < param->input_counts; ++ii)
{
concat_dim += param->input_shape[ii].dim[axis];
}
if (concat_dim != param->output_shape.dim[axis])
{
TLOG_ERR("concat dimensions is not same output: ( %d -- %d )\n", concat_dim, param->output_shape.dim[axis]);
return -1;
}
int outer_size, in_size;
outer_size = 1;
for (int ii = 0; ii < axis; ++ii)
{
outer_size *= param->output_shape.dim[ii];
}
in_size = 1;
for (int ii = axis + 1; ii < param->output_dim; ++ii)
{
in_size *= param->output_shape.dim[ii];
}
int output_size = 1;
for (int ii = 0; ii < param->output_dim; ++ii)
{
output_size *= param->output_shape.dim[ii];
}
int8_t* output_ptr = out_data;
float output_scale = param->output_shape.scale;
for (int k = 0; k < outer_size; ++k)
{
for (int j = 0; j < param->input_counts; ++j)
{
int cp_size = param->input_shape[j].dim[axis] * in_size;
float input_scale = param->input_shape[j].scale;
const int8_t* input_ptr = ( const int8_t* )(in_data[j] + k * cp_size);
if (input_scale == output_scale)
{
memcpy(output_ptr, input_ptr, cp_size);
}
else
{
float requant_scale = input_scale / output_scale;
for (int ii = 0; ii < cp_size; ++ii)
{
int data_i32 = (int)roundf((float )input_ptr[ii] * requant_scale);
if (data_i32 > 127)
data_i32 = 127;
else if (data_i32 < -127)
data_i32 = -127;
output_ptr[ii] = (int8_t)data_i32;
}
}
output_ptr += cp_size;
}
}
return 0;
}
static int init_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct concat_op_param* concat_op_param = ( struct concat_op_param* )sys_malloc(sizeof(struct concat_op_param));
concat_op_param->axis = 0;
concat_op_param->input_counts = 1;
concat_op_param->input_dim = 1;
concat_op_param->input_shape = NULL;
concat_op_param->out_scale = 0.1f;
concat_op_param->output_dim = 1;
exec_node->ops_priv = concat_op_param;
return 0;
}
static int release_node(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
sys_free(exec_node->ops_priv);
return 0;
}
static int prerun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* output_tensor;
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv;
struct concat_param* concat_param = ( struct concat_param* )ir_node->op.param_mem;
concat_op_param->axis = concat_param->axis;
concat_op_param->input_counts = ir_node->input_num;
concat_op_param->input_shape = ( struct shape_dim* )sys_malloc(sizeof(struct shape_dim) * ir_node->input_num);
concat_op_param->output_dim = output_tensor->dim_num;
for (int ii = 0; ii < output_tensor->dim_num; ii++)
{
concat_op_param->output_shape.dim[ii] = output_tensor->dims[ii];
concat_op_param->output_shape.scale = output_tensor->scale;
concat_op_param->output_shape.zero = output_tensor->zero_point;
}
concat_op_param->input_data = ( void* )sys_malloc(sizeof(void*) * ir_node->input_num);
return 0;
}
static int run(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct node* ir_node = exec_node->ir_node;
struct graph* ir_graph = ir_node->graph;
struct tensor* input_tensor;
struct tensor* output_tensor;
output_tensor = get_ir_graph_tensor(ir_graph, ir_node->output_tensors[0]);
struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv;
void* out_data = output_tensor->data;
for (int i = 0; i < ir_node->input_num; i++)
{
input_tensor = get_ir_graph_tensor(ir_graph, ir_node->input_tensors[i]);
int number = input_tensor->dim_num;
for (int j = 0; j < number; j++)
{
concat_op_param->input_shape[i].dim[j] = input_tensor->dims[j];
concat_op_param->input_shape[i].scale = input_tensor->scale;
concat_op_param->input_shape[i].zero = input_tensor->zero_point;
}
concat_op_param->input_data[i] = input_tensor->data;
}
int ret = -1;
if (input_tensor->data_type == TENGINE_DT_FP32)
ret = ref_concat_fp32(( const float** )concat_op_param->input_data, out_data, concat_op_param);
else if (input_tensor->data_type == TENGINE_DT_FP16)
ret = ref_concat_fp16(( const fp16_t** )concat_op_param->input_data, out_data, concat_op_param);
else if (input_tensor->data_type == TENGINE_DT_UINT8)
ret = ref_concat_uint8(( const uint8_t** )concat_op_param->input_data, out_data, concat_op_param);
else if (input_tensor->data_type == TENGINE_DT_INT8)
ret = ref_concat_int8(( const int8_t** )concat_op_param->input_data, out_data, concat_op_param);
else
TLOG_ERR("Input data type %d not to be supported.\n", input_tensor->data_type);
return ret;
}
static int postrun(struct node_ops* node_ops, struct exec_node* exec_node, struct exec_graph* exec_graph)
{
struct concat_op_param* concat_op_param = ( struct concat_op_param* )exec_node->ops_priv;
sys_free(concat_op_param->input_shape);
sys_free(concat_op_param->input_data);
return 0;
}
static int score(struct node_ops* node_ops, struct exec_graph* exec_graph, struct node* exec_node)
{
return OPS_SCORE_CANDO;
}
static struct node_ops hcl_node_ops = {.prerun = prerun,
.run = run,
.reshape = NULL,
.postrun = postrun,
.init_node = init_node,
.release_node = release_node,
.score = score};
int register_concat_ref_op()
{
return register_builtin_node_ops(OP_CONCAT, &hcl_node_ops);
}
int unregister_concat_ref_op()
{
return unregister_builtin_node_ops(OP_CONCAT, &hcl_node_ops);
}
|
8414.c | /* POLYBENCH/GPU-OPENMP
*
* This file is a part of the Polybench/GPU-OpenMP suite
*
* Contact:
* William Killian <killian@udel.edu>
*
* Copyright 2013, The University of Delaware
*/
#include <stdio.h>
#include <unistd.h>
#include <string.h>
#include <math.h>
/* Include polybench common header. */
#include <polybench.h>
/* Include benchmark-specific header. */
/* Default data type is double, default size is 4000. */
#include "3mm.h"
/* Array initialization. */
static
void init_array(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nk; j++)
A[i][j] = ((DATA_TYPE) i*j) / ni;
for (i = 0; i < nk; i++)
for (j = 0; j < nj; j++)
B[i][j] = ((DATA_TYPE) i*(j+1)) / nj;
for (i = 0; i < nj; i++)
for (j = 0; j < nm; j++)
C[i][j] = ((DATA_TYPE) i*(j+3)) / nl;
for (i = 0; i < nm; i++)
for (j = 0; j < nl; j++)
D[i][j] = ((DATA_TYPE) i*(j+2)) / nk;
}
/* DCE code. Must scan the entire live-out data.
Can be used also to check the correctness of the output. */
static
void print_array(int ni, int nl,
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j;
for (i = 0; i < ni; i++)
for (j = 0; j < nl; j++) {
fprintf (stderr, DATA_PRINTF_MODIFIER, G[i][j]);
if ((i * ni + j) % 20 == 0) fprintf (stderr, "\n");
}
fprintf (stderr, "\n");
}
/* Main computational kernel. The whole function will be timed,
including the call and return. */
static
void kernel_3mm(int ni, int nj, int nk, int nl, int nm,
DATA_TYPE POLYBENCH_2D(E,NI,NJ,ni,nj),
DATA_TYPE POLYBENCH_2D(A,NI,NK,ni,nk),
DATA_TYPE POLYBENCH_2D(B,NK,NJ,nk,nj),
DATA_TYPE POLYBENCH_2D(F,NJ,NL,nj,nl),
DATA_TYPE POLYBENCH_2D(C,NJ,NM,nj,nm),
DATA_TYPE POLYBENCH_2D(D,NM,NL,nm,nl),
DATA_TYPE POLYBENCH_2D(G,NI,NL,ni,nl))
{
int i, j, k;
#pragma scop
#pragma omp parallel private (i, j, k) num_threads(#P11)
{
/* E := A*B */
#pragma omp target teams distribute
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute
for (j = 0; j < _PB_NJ; j++)
{
E[i][j] = 0;
for (k = 0; k < _PB_NK; ++k)
E[i][j] += A[i][k] * B[k][j];
}
}
/* F := C*D */
#pragma omp target teams distribute
for (i = 0; i < _PB_NJ; i++)
{
#pragma omp target teams distribute
for (j = 0; j < _PB_NL; j++)
{
F[i][j] = 0;
for (k = 0; k < _PB_NM; ++k)
F[i][j] += C[i][k] * D[k][j];
}
}
/* G := E*F */
#pragma omp target teams distribute
for (i = 0; i < _PB_NI; i++)
{
#pragma omp target teams distribute
for (j = 0; j < _PB_NL; j++)
{
G[i][j] = 0;
for (k = 0; k < _PB_NJ; ++k)
G[i][j] += E[i][k] * F[k][j];
}
}
}
#pragma endscop
}
int main(int argc, char** argv)
{
/* Retrieve problem size. */
int ni = NI;
int nj = NJ;
int nk = NK;
int nl = NL;
int nm = NM;
/* Variable declaration/allocation. */
POLYBENCH_2D_ARRAY_DECL(E, DATA_TYPE, NI, NJ, ni, nj);
POLYBENCH_2D_ARRAY_DECL(A, DATA_TYPE, NI, NK, ni, nk);
POLYBENCH_2D_ARRAY_DECL(B, DATA_TYPE, NK, NJ, nk, nj);
POLYBENCH_2D_ARRAY_DECL(F, DATA_TYPE, NJ, NL, nj, nl);
POLYBENCH_2D_ARRAY_DECL(C, DATA_TYPE, NJ, NM, nj, nm);
POLYBENCH_2D_ARRAY_DECL(D, DATA_TYPE, NM, NL, nm, nl);
POLYBENCH_2D_ARRAY_DECL(G, DATA_TYPE, NI, NL, ni, nl);
/* Initialize array(s). */
init_array (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D));
/* Start timer. */
polybench_start_instruments;
/* Run kernel. */
kernel_3mm (ni, nj, nk, nl, nm,
POLYBENCH_ARRAY(E),
POLYBENCH_ARRAY(A),
POLYBENCH_ARRAY(B),
POLYBENCH_ARRAY(F),
POLYBENCH_ARRAY(C),
POLYBENCH_ARRAY(D),
POLYBENCH_ARRAY(G));
/* Stop and print timer. */
polybench_stop_instruments;
polybench_print_instruments;
/* Prevent dead-code elimination. All live-out data must be printed
by the function call in argument. */
polybench_prevent_dce(print_array(ni, nl, POLYBENCH_ARRAY(G)));
/* Be clean. */
POLYBENCH_FREE_ARRAY(E);
POLYBENCH_FREE_ARRAY(A);
POLYBENCH_FREE_ARRAY(B);
POLYBENCH_FREE_ARRAY(F);
POLYBENCH_FREE_ARRAY(C);
POLYBENCH_FREE_ARRAY(D);
POLYBENCH_FREE_ARRAY(G);
return 0;
}
|
main.c | # include <stdio.h>
# include <omp.h>
# include "sim.h"
# include "ga.h"
# include "gradient.h"
# include "utils.h"
#include <omp.h>
/*
* Function saving the best individual results
*
* @param population whole population
* @param bestindividual position of the best individual
*/
void save_bestind(Genome * population, int bestindividual){
FILE *fp;
double * ic;
ic = (double *) malloc(CoreModelDIM * sizeof(double));
genotype_to_phenotype(population + bestindividual, ic);
if ((fp = fopen("bestindividual.txt", "w")) != 0)
printf("Could not open file");
fprintf(fp, "fitness: %.16f\nE: %.16f\nI_1: %.16f\nA: %.16f\n", population[bestindividual].fitness, ic[1], ic[2], ic[3]);
fprintf(fp, "beta: %.16f\nphi: %.16f\nepsilon_i: %.16f\nepsilon_Y: %.16f\nsigma: %.16f\ngamma_1: %.16f\ngamma_2: %.16f\nkappa: %.16f\np: %.16f\nalpha: %.16f\ndelta: %.16f",
population->c2[0],
population->c2[1],
population->c2[2],
population->c2[3],
population->c2[4],
population->c2[5],
population->c2[6],
population->c2[7],
population->c2[8],
population->c2[9],
population->c2[10]);
store_trajectory(ic, population->c2, fp);
fclose(fp);
free(ic);
}
/*
*Function printing out the genome
*/
void printf_genome(Genome * g) {
printf("%.8f,%d,%d,%d", g->fitness, g->c1[0], g->c1[1], g->c1[2]);
printf("%.16f,%.16f,%.16f,%.16f,%.16f,%.16f,%.16f,%.16f,%.16f,%.16f,%.16f\n",
g->c2[0],
g->c2[1],
g->c2[2],
g->c2[3],
g->c2[4],
g->c2[5],
g->c2[6],
g->c2[7],
g->c2[8],
g->c2[9],
g->c2[10]);
}
///////////////////////////////////////////////////////////////////////////////////////
// Main Function //////////////////////////////////////////////////////////////////////
///////////////////////////////////////////////////////////////////////////////////////
int main(int argc, char ** argv) {
//set up of the genetic algorithm
int individuals = 200;
if(argc > 1) individuals = atoi(argv[1]);
int maxiter = 20000; // ficar la possibilitat de donarho en runtime
if(argc > 2) maxiter = atoi(argv[2]);
printf("Initializing with %d individuals and %d maxiter\n", individuals, maxiter);
int iter = 0;
// parameters for the next generation calculation
int number_elitism = 2;
int number_selection = 40;
int number_crossover = 70;
int number_migration = individuals - number_elitism - number_selection - number_crossover;
int best_individual;
double scale_factor = 0.1;
//parameters for the extinctions, similarly has the parameters for the next generation calculation after extinction
int cooldown = 200;
unsigned int extinction_period = 500;
int number_survivors = 2;
int extinc_migration = 0;
int extinc_selection = 50;
int extinc_cross = individuals - number_survivors - extinc_migration - extinc_selection;
Genome * population;
Genome * temp_population;
temp_population = (Genome *) malloc(individuals * sizeof(Genome));
//fitness used
fitness_func ff;
ff = fitness_uniform;
//some auxiliar variables
int ek = 0;
int recovery = cooldown + 1 ;
double fitness_temp=1.f;
float epsilon = 1.0;
//random initializer
init_rng();
printf("Generating initial population\n");
population = generate_population(individuals);
int i;
printf("Entering genetic algorithm\n");
while (iter < maxiter) {
// fitness calculates the fitness of every guy in the population
#pragma omp parallel for
for (i = 0; i < individuals; i++) {// exclude those simulation of repeated genes to speed up simulation!
if (population[i].fitness < 0) compute_fitness(population + i, ff); // TODO: parallel
}
// every some steps the individuals are optimized with a deterministic optimisation
if (iter % 1000 == 0) {
#pragma omp parallel for
for (i = 0; i < 100; i ++)
if (population[i].fitness > 0) optimise_parameters(population + i, ff);
}
//extinction routines
if (recovery < cooldown) {
best_individual = next_generation(population, temp_population,
number_survivors, extinc_selection, extinc_cross, extinc_migration,
0.3, scale_factor * (1 - ((double)iter) / ((double)maxiter)));
recovery++;
fitness_temp=population[best_individual].fitness;
} else {
int rdn=random_int(extinction_period);
if (rdn < ek) {
recovery=0;
change_seed();
ek = extinction( ek, population, temp_population, individuals, number_survivors);
printf("An extinction has occurred\n");
} else {
//if no extinction happens a normal next generation is done
best_individual = next_generation(population, temp_population,
number_elitism, number_selection, number_crossover, number_migration,
0.5, scale_factor * (1 - ((double)iter) / ((double)maxiter)));
//if the fitness is not improving the chances of extinction rise
if ((abs(population[best_individual].fitness -fitness_temp) < epsilon )
||((population[best_individual].fitness -fitness_temp)==0)) ek++;
fitness_temp=population[best_individual].fitness;
}
}
if (iter % (maxiter/100) == 0)
printf("Generation %d with fitness %.8f\n", iter, population[best_individual].fitness);
// exchange pointers of parents and children populations
Genome * tmp = population;
population = temp_population;
temp_population = tmp;
++iter;
}
printf_genome(temp_population + best_individual);
save_bestind(temp_population, best_individual);
printf("Exited genetic algorithm\n");
printf("Fitness reached of %f, and total iterations of %d\n", temp_population[best_individual].fitness, iter);
free_rng();
}
|
DRB025-simdtruedep-var-yes.c | /*
Copyright (c) 2017, Lawrence Livermore National Security, LLC.
Produced at the Lawrence Livermore National Laboratory
Written by Chunhua Liao, Pei-Hung Lin, Joshua Asplund,
Markus Schordan, and Ian Karlin
(email: liao6@llnl.gov, lin32@llnl.gov, asplund1@llnl.gov,
schordan1@llnl.gov, karlin1@llnl.gov)
LLNL-CODE-732144
All rights reserved.
This file is part of DataRaceBench. For details, see
https://github.com/LLNL/dataracebench. Please also see the LICENSE file
for our additional BSD notice.
Redistribution and use in source and binary forms, with
or without modification, are permitted provided that the following
conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the disclaimer below.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the disclaimer (as noted below)
in the documentation and/or other materials provided with the
distribution.
* Neither the name of the LLNS/LLNL nor the names of its contributors
may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL LAWRENCE LIVERMORE NATIONAL
SECURITY, LLC, THE U.S. DEPARTMENT OF ENERGY OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
THE POSSIBILITY OF SUCH DAMAGE.
*/
/*
This one has race condition due to true dependence.
But data races happen at instruction level, not thread level.
Data race pair: a[i+1]@68:5 vs. a[i]@68:12
*/
#include "omprace.h"
#include <omp.h>
#include <stdlib.h>
int main(int argc, char* argv[])
{
omprace_init();
int i;
int len=100;
if (argc>1)
len = atoi(argv[1]);
int a[len], b[len];
for (i=0;i<len;i++)
{
a[i]=i;
b[i]=i+1;
}
#pragma omp simd
for (i=0;i<len-1;i++)
a[i+1]=a[i]*b[i];
omprace_fini();
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.